aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/fpga
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/fpga')
-rw-r--r--drivers/fpga/Kconfig68
-rw-r--r--drivers/fpga/Makefile14
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c463
-rw-r--r--drivers/fpga/dfl-afu-main.c636
-rw-r--r--drivers/fpga/dfl-afu-region.c166
-rw-r--r--drivers/fpga/dfl-afu.h100
-rw-r--r--drivers/fpga/dfl-fme-br.c114
-rw-r--r--drivers/fpga/dfl-fme-main.c279
-rw-r--r--drivers/fpga/dfl-fme-mgr.c349
-rw-r--r--drivers/fpga/dfl-fme-pr.c479
-rw-r--r--drivers/fpga/dfl-fme-pr.h84
-rw-r--r--drivers/fpga/dfl-fme-region.c89
-rw-r--r--drivers/fpga/dfl-fme.h38
-rw-r--r--drivers/fpga/dfl-pci.c243
-rw-r--r--drivers/fpga/dfl.c1044
-rw-r--r--drivers/fpga/dfl.h410
-rw-r--r--drivers/fpga/fpga-mgr.c28
-rw-r--r--drivers/fpga/fpga-region.c22
18 files changed, 4626 insertions, 0 deletions
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index ee9c5420c47f..1ebcef4bab5b 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -130,4 +130,72 @@ config OF_FPGA_REGION
130 Support for loading FPGA images by applying a Device Tree 130 Support for loading FPGA images by applying a Device Tree
131 overlay. 131 overlay.
132 132
133config FPGA_DFL
134 tristate "FPGA Device Feature List (DFL) support"
135 select FPGA_BRIDGE
136 select FPGA_REGION
137 help
138 Device Feature List (DFL) defines a feature list structure that
139 creates a linked list of feature headers within the MMIO space
140 to provide an extensible way of adding features for FPGA.
141 Driver can walk through the feature headers to enumerate feature
142 devices (e.g. FPGA Management Engine, Port and Accelerator
143 Function Unit) and their private features for target FPGA devices.
144
145 Select this option to enable common support for Field-Programmable
146 Gate Array (FPGA) solutions which implement Device Feature List.
147 It provides enumeration APIs and feature device infrastructure.
148
149config FPGA_DFL_FME
150 tristate "FPGA DFL FME Driver"
151 depends on FPGA_DFL
152 help
153 The FPGA Management Engine (FME) is a feature device implemented
154 under Device Feature List (DFL) framework. Select this option to
155 enable the platform device driver for FME which implements all
156 FPGA platform level management features. There shall be one FME
157 per DFL based FPGA device.
158
159config FPGA_DFL_FME_MGR
160 tristate "FPGA DFL FME Manager Driver"
161 depends on FPGA_DFL_FME && HAS_IOMEM
162 help
163 Say Y to enable FPGA Manager driver for FPGA Management Engine.
164
165config FPGA_DFL_FME_BRIDGE
166 tristate "FPGA DFL FME Bridge Driver"
167 depends on FPGA_DFL_FME && HAS_IOMEM
168 help
169 Say Y to enable FPGA Bridge driver for FPGA Management Engine.
170
171config FPGA_DFL_FME_REGION
172 tristate "FPGA DFL FME Region Driver"
173 depends on FPGA_DFL_FME && HAS_IOMEM
174 help
175 Say Y to enable FPGA Region driver for FPGA Management Engine.
176
177config FPGA_DFL_AFU
178 tristate "FPGA DFL AFU Driver"
179 depends on FPGA_DFL
180 help
181 This is the driver for FPGA Accelerated Function Unit (AFU) which
182 implements AFU and Port management features. A User AFU connects
183 to the FPGA infrastructure via a Port. There may be more than one
184 Port/AFU per DFL based FPGA device.
185
186config FPGA_DFL_PCI
187 tristate "FPGA DFL PCIe Device Driver"
188 depends on PCI && FPGA_DFL
189 help
190 Select this option to enable PCIe driver for PCIe-based
191 Field-Programmable Gate Array (FPGA) solutions which implement
192 the Device Feature List (DFL). This driver provides interfaces
193 for userspace applications to configure, enumerate, open and access
194 FPGA accelerators on the FPGA DFL devices, enables system level
195 management functions such as FPGA partial reconfiguration, power
196 management and virtualization with DFL framework and DFL feature
197 device drivers.
198
199 To compile this as a module, choose M here.
200
133endif # FPGA 201endif # FPGA
diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile
index f9803dad6919..7a2d73ba7122 100644
--- a/drivers/fpga/Makefile
+++ b/drivers/fpga/Makefile
@@ -28,3 +28,17 @@ obj-$(CONFIG_XILINX_PR_DECOUPLER) += xilinx-pr-decoupler.o
28# High Level Interfaces 28# High Level Interfaces
29obj-$(CONFIG_FPGA_REGION) += fpga-region.o 29obj-$(CONFIG_FPGA_REGION) += fpga-region.o
30obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o 30obj-$(CONFIG_OF_FPGA_REGION) += of-fpga-region.o
31
32# FPGA Device Feature List Support
33obj-$(CONFIG_FPGA_DFL) += dfl.o
34obj-$(CONFIG_FPGA_DFL_FME) += dfl-fme.o
35obj-$(CONFIG_FPGA_DFL_FME_MGR) += dfl-fme-mgr.o
36obj-$(CONFIG_FPGA_DFL_FME_BRIDGE) += dfl-fme-br.o
37obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o
38obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o
39
40dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o
41dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o
42
43# Drivers for FPGAs which implement DFL
44obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
new file mode 100644
index 000000000000..0e81d33af856
--- /dev/null
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -0,0 +1,463 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Accelerated Function Unit (AFU) DMA Region Management
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 */
11
12#include <linux/dma-mapping.h>
13#include <linux/sched/signal.h>
14#include <linux/uaccess.h>
15
16#include "dfl-afu.h"
17
18static void put_all_pages(struct page **pages, int npages)
19{
20 int i;
21
22 for (i = 0; i < npages; i++)
23 if (pages[i])
24 put_page(pages[i]);
25}
26
27void afu_dma_region_init(struct dfl_feature_platform_data *pdata)
28{
29 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
30
31 afu->dma_regions = RB_ROOT;
32}
33
34/**
35 * afu_dma_adjust_locked_vm - adjust locked memory
36 * @dev: port device
37 * @npages: number of pages
38 * @incr: increase or decrease locked memory
39 *
40 * Increase or decrease the locked memory size with npages input.
41 *
42 * Return 0 on success.
43 * Return -ENOMEM if locked memory size is over the limit and no CAP_IPC_LOCK.
44 */
45static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
46{
47 unsigned long locked, lock_limit;
48 int ret = 0;
49
50 /* the task is exiting. */
51 if (!current->mm)
52 return 0;
53
54 down_write(&current->mm->mmap_sem);
55
56 if (incr) {
57 locked = current->mm->locked_vm + npages;
58 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
59
60 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
61 ret = -ENOMEM;
62 else
63 current->mm->locked_vm += npages;
64 } else {
65 if (WARN_ON_ONCE(npages > current->mm->locked_vm))
66 npages = current->mm->locked_vm;
67 current->mm->locked_vm -= npages;
68 }
69
70 dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
71 incr ? '+' : '-', npages << PAGE_SHIFT,
72 current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
73 ret ? "- execeeded" : "");
74
75 up_write(&current->mm->mmap_sem);
76
77 return ret;
78}
79
80/**
81 * afu_dma_pin_pages - pin pages of given dma memory region
82 * @pdata: feature device platform data
83 * @region: dma memory region to be pinned
84 *
85 * Pin all the pages of given dfl_afu_dma_region.
86 * Return 0 for success or negative error code.
87 */
88static int afu_dma_pin_pages(struct dfl_feature_platform_data *pdata,
89 struct dfl_afu_dma_region *region)
90{
91 int npages = region->length >> PAGE_SHIFT;
92 struct device *dev = &pdata->dev->dev;
93 int ret, pinned;
94
95 ret = afu_dma_adjust_locked_vm(dev, npages, true);
96 if (ret)
97 return ret;
98
99 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
100 if (!region->pages) {
101 ret = -ENOMEM;
102 goto unlock_vm;
103 }
104
105 pinned = get_user_pages_fast(region->user_addr, npages, 1,
106 region->pages);
107 if (pinned < 0) {
108 ret = pinned;
109 goto put_pages;
110 } else if (pinned != npages) {
111 ret = -EFAULT;
112 goto free_pages;
113 }
114
115 dev_dbg(dev, "%d pages pinned\n", pinned);
116
117 return 0;
118
119put_pages:
120 put_all_pages(region->pages, pinned);
121free_pages:
122 kfree(region->pages);
123unlock_vm:
124 afu_dma_adjust_locked_vm(dev, npages, false);
125 return ret;
126}
127
128/**
129 * afu_dma_unpin_pages - unpin pages of given dma memory region
130 * @pdata: feature device platform data
131 * @region: dma memory region to be unpinned
132 *
133 * Unpin all the pages of given dfl_afu_dma_region.
134 * Return 0 for success or negative error code.
135 */
136static void afu_dma_unpin_pages(struct dfl_feature_platform_data *pdata,
137 struct dfl_afu_dma_region *region)
138{
139 long npages = region->length >> PAGE_SHIFT;
140 struct device *dev = &pdata->dev->dev;
141
142 put_all_pages(region->pages, npages);
143 kfree(region->pages);
144 afu_dma_adjust_locked_vm(dev, npages, false);
145
146 dev_dbg(dev, "%ld pages unpinned\n", npages);
147}
148
149/**
150 * afu_dma_check_continuous_pages - check if pages are continuous
151 * @region: dma memory region
152 *
153 * Return true if pages of given dma memory region have continuous physical
154 * address, otherwise return false.
155 */
156static bool afu_dma_check_continuous_pages(struct dfl_afu_dma_region *region)
157{
158 int npages = region->length >> PAGE_SHIFT;
159 int i;
160
161 for (i = 0; i < npages - 1; i++)
162 if (page_to_pfn(region->pages[i]) + 1 !=
163 page_to_pfn(region->pages[i + 1]))
164 return false;
165
166 return true;
167}
168
169/**
170 * dma_region_check_iova - check if memory area is fully contained in the region
171 * @region: dma memory region
172 * @iova: address of the dma memory area
173 * @size: size of the dma memory area
174 *
175 * Compare the dma memory area defined by @iova and @size with given dma region.
176 * Return true if memory area is fully contained in the region, otherwise false.
177 */
178static bool dma_region_check_iova(struct dfl_afu_dma_region *region,
179 u64 iova, u64 size)
180{
181 if (!size && region->iova != iova)
182 return false;
183
184 return (region->iova <= iova) &&
185 (region->length + region->iova >= iova + size);
186}
187
188/**
189 * afu_dma_region_add - add given dma region to rbtree
190 * @pdata: feature device platform data
191 * @region: dma region to be added
192 *
193 * Return 0 for success, -EEXIST if dma region has already been added.
194 *
195 * Needs to be called with pdata->lock heold.
196 */
197static int afu_dma_region_add(struct dfl_feature_platform_data *pdata,
198 struct dfl_afu_dma_region *region)
199{
200 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
201 struct rb_node **new, *parent = NULL;
202
203 dev_dbg(&pdata->dev->dev, "add region (iova = %llx)\n",
204 (unsigned long long)region->iova);
205
206 new = &afu->dma_regions.rb_node;
207
208 while (*new) {
209 struct dfl_afu_dma_region *this;
210
211 this = container_of(*new, struct dfl_afu_dma_region, node);
212
213 parent = *new;
214
215 if (dma_region_check_iova(this, region->iova, region->length))
216 return -EEXIST;
217
218 if (region->iova < this->iova)
219 new = &((*new)->rb_left);
220 else if (region->iova > this->iova)
221 new = &((*new)->rb_right);
222 else
223 return -EEXIST;
224 }
225
226 rb_link_node(&region->node, parent, new);
227 rb_insert_color(&region->node, &afu->dma_regions);
228
229 return 0;
230}
231
232/**
233 * afu_dma_region_remove - remove given dma region from rbtree
234 * @pdata: feature device platform data
235 * @region: dma region to be removed
236 *
237 * Needs to be called with pdata->lock heold.
238 */
239static void afu_dma_region_remove(struct dfl_feature_platform_data *pdata,
240 struct dfl_afu_dma_region *region)
241{
242 struct dfl_afu *afu;
243
244 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
245 (unsigned long long)region->iova);
246
247 afu = dfl_fpga_pdata_get_private(pdata);
248 rb_erase(&region->node, &afu->dma_regions);
249}
250
251/**
252 * afu_dma_region_destroy - destroy all regions in rbtree
253 * @pdata: feature device platform data
254 *
255 * Needs to be called with pdata->lock heold.
256 */
257void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata)
258{
259 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
260 struct rb_node *node = rb_first(&afu->dma_regions);
261 struct dfl_afu_dma_region *region;
262
263 while (node) {
264 region = container_of(node, struct dfl_afu_dma_region, node);
265
266 dev_dbg(&pdata->dev->dev, "del region (iova = %llx)\n",
267 (unsigned long long)region->iova);
268
269 rb_erase(node, &afu->dma_regions);
270
271 if (region->iova)
272 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
273 region->iova, region->length,
274 DMA_BIDIRECTIONAL);
275
276 if (region->pages)
277 afu_dma_unpin_pages(pdata, region);
278
279 node = rb_next(node);
280 kfree(region);
281 }
282}
283
284/**
285 * afu_dma_region_find - find the dma region from rbtree based on iova and size
286 * @pdata: feature device platform data
287 * @iova: address of the dma memory area
288 * @size: size of the dma memory area
289 *
290 * It finds the dma region from the rbtree based on @iova and @size:
291 * - if @size == 0, it finds the dma region which starts from @iova
292 * - otherwise, it finds the dma region which fully contains
293 * [@iova, @iova+size)
294 * If nothing is matched returns NULL.
295 *
296 * Needs to be called with pdata->lock held.
297 */
298struct dfl_afu_dma_region *
299afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
300{
301 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
302 struct rb_node *node = afu->dma_regions.rb_node;
303 struct device *dev = &pdata->dev->dev;
304
305 while (node) {
306 struct dfl_afu_dma_region *region;
307
308 region = container_of(node, struct dfl_afu_dma_region, node);
309
310 if (dma_region_check_iova(region, iova, size)) {
311 dev_dbg(dev, "find region (iova = %llx)\n",
312 (unsigned long long)region->iova);
313 return region;
314 }
315
316 if (iova < region->iova)
317 node = node->rb_left;
318 else if (iova > region->iova)
319 node = node->rb_right;
320 else
321 /* the iova region is not fully covered. */
322 break;
323 }
324
325 dev_dbg(dev, "region with iova %llx and size %llx is not found\n",
326 (unsigned long long)iova, (unsigned long long)size);
327
328 return NULL;
329}
330
331/**
332 * afu_dma_region_find_iova - find the dma region from rbtree by iova
333 * @pdata: feature device platform data
334 * @iova: address of the dma region
335 *
336 * Needs to be called with pdata->lock held.
337 */
338static struct dfl_afu_dma_region *
339afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
340{
341 return afu_dma_region_find(pdata, iova, 0);
342}
343
344/**
345 * afu_dma_map_region - map memory region for dma
346 * @pdata: feature device platform data
347 * @user_addr: address of the memory region
348 * @length: size of the memory region
349 * @iova: pointer of iova address
350 *
351 * Map memory region defined by @user_addr and @length, and return dma address
352 * of the memory region via @iova.
353 * Return 0 for success, otherwise error code.
354 */
355int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
356 u64 user_addr, u64 length, u64 *iova)
357{
358 struct dfl_afu_dma_region *region;
359 int ret;
360
361 /*
362 * Check Inputs, only accept page-aligned user memory region with
363 * valid length.
364 */
365 if (!PAGE_ALIGNED(user_addr) || !PAGE_ALIGNED(length) || !length)
366 return -EINVAL;
367
368 /* Check overflow */
369 if (user_addr + length < user_addr)
370 return -EINVAL;
371
372 if (!access_ok(VERIFY_WRITE, (void __user *)(unsigned long)user_addr,
373 length))
374 return -EINVAL;
375
376 region = kzalloc(sizeof(*region), GFP_KERNEL);
377 if (!region)
378 return -ENOMEM;
379
380 region->user_addr = user_addr;
381 region->length = length;
382
383 /* Pin the user memory region */
384 ret = afu_dma_pin_pages(pdata, region);
385 if (ret) {
386 dev_err(&pdata->dev->dev, "failed to pin memory region\n");
387 goto free_region;
388 }
389
390 /* Only accept continuous pages, return error else */
391 if (!afu_dma_check_continuous_pages(region)) {
392 dev_err(&pdata->dev->dev, "pages are not continuous\n");
393 ret = -EINVAL;
394 goto unpin_pages;
395 }
396
397 /* As pages are continuous then start to do DMA mapping */
398 region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
399 region->pages[0], 0,
400 region->length,
401 DMA_BIDIRECTIONAL);
402 if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
403 dev_err(&pdata->dev->dev, "failed to map for dma\n");
404 ret = -EFAULT;
405 goto unpin_pages;
406 }
407
408 *iova = region->iova;
409
410 mutex_lock(&pdata->lock);
411 ret = afu_dma_region_add(pdata, region);
412 mutex_unlock(&pdata->lock);
413 if (ret) {
414 dev_err(&pdata->dev->dev, "failed to add dma region\n");
415 goto unmap_dma;
416 }
417
418 return 0;
419
420unmap_dma:
421 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
422 region->iova, region->length, DMA_BIDIRECTIONAL);
423unpin_pages:
424 afu_dma_unpin_pages(pdata, region);
425free_region:
426 kfree(region);
427 return ret;
428}
429
430/**
431 * afu_dma_unmap_region - unmap dma memory region
432 * @pdata: feature device platform data
433 * @iova: dma address of the region
434 *
435 * Unmap dma memory region based on @iova.
436 * Return 0 for success, otherwise error code.
437 */
438int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
439{
440 struct dfl_afu_dma_region *region;
441
442 mutex_lock(&pdata->lock);
443 region = afu_dma_region_find_iova(pdata, iova);
444 if (!region) {
445 mutex_unlock(&pdata->lock);
446 return -EINVAL;
447 }
448
449 if (region->in_use) {
450 mutex_unlock(&pdata->lock);
451 return -EBUSY;
452 }
453
454 afu_dma_region_remove(pdata, region);
455 mutex_unlock(&pdata->lock);
456
457 dma_unmap_page(dfl_fpga_pdata_to_parent(pdata),
458 region->iova, region->length, DMA_BIDIRECTIONAL);
459 afu_dma_unpin_pages(pdata, region);
460 kfree(region);
461
462 return 0;
463}
diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c
new file mode 100644
index 000000000000..02baa6a227c0
--- /dev/null
+++ b/drivers/fpga/dfl-afu-main.c
@@ -0,0 +1,636 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Accelerated Function Unit (AFU)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/uaccess.h>
20#include <linux/fpga-dfl.h>
21
22#include "dfl-afu.h"
23
24/**
25 * port_enable - enable a port
26 * @pdev: port platform device.
27 *
28 * Enable Port by clear the port soft reset bit, which is set by default.
29 * The AFU is unable to respond to any MMIO access while in reset.
30 * port_enable function should only be used after port_disable function.
31 */
32static void port_enable(struct platform_device *pdev)
33{
34 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
35 void __iomem *base;
36 u64 v;
37
38 WARN_ON(!pdata->disable_count);
39
40 if (--pdata->disable_count != 0)
41 return;
42
43 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
44
45 /* Clear port soft reset */
46 v = readq(base + PORT_HDR_CTRL);
47 v &= ~PORT_CTRL_SFTRST;
48 writeq(v, base + PORT_HDR_CTRL);
49}
50
51#define RST_POLL_INVL 10 /* us */
52#define RST_POLL_TIMEOUT 1000 /* us */
53
54/**
55 * port_disable - disable a port
56 * @pdev: port platform device.
57 *
58 * Disable Port by setting the port soft reset bit, it puts the port into
59 * reset.
60 */
61static int port_disable(struct platform_device *pdev)
62{
63 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
64 void __iomem *base;
65 u64 v;
66
67 if (pdata->disable_count++ != 0)
68 return 0;
69
70 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
71
72 /* Set port soft reset */
73 v = readq(base + PORT_HDR_CTRL);
74 v |= PORT_CTRL_SFTRST;
75 writeq(v, base + PORT_HDR_CTRL);
76
77 /*
78 * HW sets ack bit to 1 when all outstanding requests have been drained
79 * on this port and minimum soft reset pulse width has elapsed.
80 * Driver polls port_soft_reset_ack to determine if reset done by HW.
81 */
82 if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
83 RST_POLL_INVL, RST_POLL_TIMEOUT)) {
84 dev_err(&pdev->dev, "timeout, fail to reset device\n");
85 return -ETIMEDOUT;
86 }
87
88 return 0;
89}
90
91/*
92 * This function resets the FPGA Port and its accelerator (AFU) by function
93 * __port_disable and __port_enable (set port soft reset bit and then clear
94 * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
95 * Reconfiguration. But it should never cause any system level issue, only
96 * functional failure (e.g. DMA or PR operation failure) and be recoverable
97 * from the failure.
98 *
99 * Note: the accelerator (AFU) is not accessible when its port is in reset
100 * (disabled). Any attempts on MMIO access to AFU while in reset, will
101 * result errors reported via port error reporting sub feature (if present).
102 */
103static int __port_reset(struct platform_device *pdev)
104{
105 int ret;
106
107 ret = port_disable(pdev);
108 if (!ret)
109 port_enable(pdev);
110
111 return ret;
112}
113
114static int port_reset(struct platform_device *pdev)
115{
116 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
117 int ret;
118
119 mutex_lock(&pdata->lock);
120 ret = __port_reset(pdev);
121 mutex_unlock(&pdata->lock);
122
123 return ret;
124}
125
126static int port_get_id(struct platform_device *pdev)
127{
128 void __iomem *base;
129
130 base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
131
132 return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
133}
134
135static ssize_t
136id_show(struct device *dev, struct device_attribute *attr, char *buf)
137{
138 int id = port_get_id(to_platform_device(dev));
139
140 return scnprintf(buf, PAGE_SIZE, "%d\n", id);
141}
142static DEVICE_ATTR_RO(id);
143
144static const struct attribute *port_hdr_attrs[] = {
145 &dev_attr_id.attr,
146 NULL,
147};
148
149static int port_hdr_init(struct platform_device *pdev,
150 struct dfl_feature *feature)
151{
152 dev_dbg(&pdev->dev, "PORT HDR Init.\n");
153
154 port_reset(pdev);
155
156 return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
157}
158
159static void port_hdr_uinit(struct platform_device *pdev,
160 struct dfl_feature *feature)
161{
162 dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
163
164 sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
165}
166
167static long
168port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
169 unsigned int cmd, unsigned long arg)
170{
171 long ret;
172
173 switch (cmd) {
174 case DFL_FPGA_PORT_RESET:
175 if (!arg)
176 ret = port_reset(pdev);
177 else
178 ret = -EINVAL;
179 break;
180 default:
181 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
182 ret = -ENODEV;
183 }
184
185 return ret;
186}
187
188static const struct dfl_feature_ops port_hdr_ops = {
189 .init = port_hdr_init,
190 .uinit = port_hdr_uinit,
191 .ioctl = port_hdr_ioctl,
192};
193
194static ssize_t
195afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
196{
197 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
198 void __iomem *base;
199 u64 guidl, guidh;
200
201 base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
202
203 mutex_lock(&pdata->lock);
204 if (pdata->disable_count) {
205 mutex_unlock(&pdata->lock);
206 return -EBUSY;
207 }
208
209 guidl = readq(base + GUID_L);
210 guidh = readq(base + GUID_H);
211 mutex_unlock(&pdata->lock);
212
213 return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
214}
215static DEVICE_ATTR_RO(afu_id);
216
217static const struct attribute *port_afu_attrs[] = {
218 &dev_attr_afu_id.attr,
219 NULL
220};
221
222static int port_afu_init(struct platform_device *pdev,
223 struct dfl_feature *feature)
224{
225 struct resource *res = &pdev->resource[feature->resource_index];
226 int ret;
227
228 dev_dbg(&pdev->dev, "PORT AFU Init.\n");
229
230 ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
231 DFL_PORT_REGION_INDEX_AFU, resource_size(res),
232 res->start, DFL_PORT_REGION_READ |
233 DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
234 if (ret)
235 return ret;
236
237 return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
238}
239
240static void port_afu_uinit(struct platform_device *pdev,
241 struct dfl_feature *feature)
242{
243 dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
244
245 sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
246}
247
248static const struct dfl_feature_ops port_afu_ops = {
249 .init = port_afu_init,
250 .uinit = port_afu_uinit,
251};
252
253static struct dfl_feature_driver port_feature_drvs[] = {
254 {
255 .id = PORT_FEATURE_ID_HEADER,
256 .ops = &port_hdr_ops,
257 },
258 {
259 .id = PORT_FEATURE_ID_AFU,
260 .ops = &port_afu_ops,
261 },
262 {
263 .ops = NULL,
264 }
265};
266
267static int afu_open(struct inode *inode, struct file *filp)
268{
269 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
270 struct dfl_feature_platform_data *pdata;
271 int ret;
272
273 pdata = dev_get_platdata(&fdev->dev);
274 if (WARN_ON(!pdata))
275 return -ENODEV;
276
277 ret = dfl_feature_dev_use_begin(pdata);
278 if (ret)
279 return ret;
280
281 dev_dbg(&fdev->dev, "Device File Open\n");
282 filp->private_data = fdev;
283
284 return 0;
285}
286
287static int afu_release(struct inode *inode, struct file *filp)
288{
289 struct platform_device *pdev = filp->private_data;
290 struct dfl_feature_platform_data *pdata;
291
292 dev_dbg(&pdev->dev, "Device File Release\n");
293
294 pdata = dev_get_platdata(&pdev->dev);
295
296 mutex_lock(&pdata->lock);
297 __port_reset(pdev);
298 afu_dma_region_destroy(pdata);
299 mutex_unlock(&pdata->lock);
300
301 dfl_feature_dev_use_end(pdata);
302
303 return 0;
304}
305
306static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
307 unsigned long arg)
308{
309 /* No extension support for now */
310 return 0;
311}
312
313static long
314afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
315{
316 struct dfl_fpga_port_info info;
317 struct dfl_afu *afu;
318 unsigned long minsz;
319
320 minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
321
322 if (copy_from_user(&info, arg, minsz))
323 return -EFAULT;
324
325 if (info.argsz < minsz)
326 return -EINVAL;
327
328 mutex_lock(&pdata->lock);
329 afu = dfl_fpga_pdata_get_private(pdata);
330 info.flags = 0;
331 info.num_regions = afu->num_regions;
332 info.num_umsgs = afu->num_umsgs;
333 mutex_unlock(&pdata->lock);
334
335 if (copy_to_user(arg, &info, sizeof(info)))
336 return -EFAULT;
337
338 return 0;
339}
340
341static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
342 void __user *arg)
343{
344 struct dfl_fpga_port_region_info rinfo;
345 struct dfl_afu_mmio_region region;
346 unsigned long minsz;
347 long ret;
348
349 minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
350
351 if (copy_from_user(&rinfo, arg, minsz))
352 return -EFAULT;
353
354 if (rinfo.argsz < minsz || rinfo.padding)
355 return -EINVAL;
356
357 ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
358 if (ret)
359 return ret;
360
361 rinfo.flags = region.flags;
362 rinfo.size = region.size;
363 rinfo.offset = region.offset;
364
365 if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
366 return -EFAULT;
367
368 return 0;
369}
370
371static long
372afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
373{
374 struct dfl_fpga_port_dma_map map;
375 unsigned long minsz;
376 long ret;
377
378 minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
379
380 if (copy_from_user(&map, arg, minsz))
381 return -EFAULT;
382
383 if (map.argsz < minsz || map.flags)
384 return -EINVAL;
385
386 ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
387 if (ret)
388 return ret;
389
390 if (copy_to_user(arg, &map, sizeof(map))) {
391 afu_dma_unmap_region(pdata, map.iova);
392 return -EFAULT;
393 }
394
395 dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
396 (unsigned long long)map.user_addr,
397 (unsigned long long)map.length,
398 (unsigned long long)map.iova);
399
400 return 0;
401}
402
403static long
404afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
405{
406 struct dfl_fpga_port_dma_unmap unmap;
407 unsigned long minsz;
408
409 minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
410
411 if (copy_from_user(&unmap, arg, minsz))
412 return -EFAULT;
413
414 if (unmap.argsz < minsz || unmap.flags)
415 return -EINVAL;
416
417 return afu_dma_unmap_region(pdata, unmap.iova);
418}
419
420static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
421{
422 struct platform_device *pdev = filp->private_data;
423 struct dfl_feature_platform_data *pdata;
424 struct dfl_feature *f;
425 long ret;
426
427 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
428
429 pdata = dev_get_platdata(&pdev->dev);
430
431 switch (cmd) {
432 case DFL_FPGA_GET_API_VERSION:
433 return DFL_FPGA_API_VERSION;
434 case DFL_FPGA_CHECK_EXTENSION:
435 return afu_ioctl_check_extension(pdata, arg);
436 case DFL_FPGA_PORT_GET_INFO:
437 return afu_ioctl_get_info(pdata, (void __user *)arg);
438 case DFL_FPGA_PORT_GET_REGION_INFO:
439 return afu_ioctl_get_region_info(pdata, (void __user *)arg);
440 case DFL_FPGA_PORT_DMA_MAP:
441 return afu_ioctl_dma_map(pdata, (void __user *)arg);
442 case DFL_FPGA_PORT_DMA_UNMAP:
443 return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
444 default:
445 /*
446 * Let sub-feature's ioctl function to handle the cmd
447 * Sub-feature's ioctl returns -ENODEV when cmd is not
448 * handled in this sub feature, and returns 0 and other
449 * error code if cmd is handled.
450 */
451 dfl_fpga_dev_for_each_feature(pdata, f)
452 if (f->ops && f->ops->ioctl) {
453 ret = f->ops->ioctl(pdev, f, cmd, arg);
454 if (ret != -ENODEV)
455 return ret;
456 }
457 }
458
459 return -EINVAL;
460}
461
462static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
463{
464 struct platform_device *pdev = filp->private_data;
465 struct dfl_feature_platform_data *pdata;
466 u64 size = vma->vm_end - vma->vm_start;
467 struct dfl_afu_mmio_region region;
468 u64 offset;
469 int ret;
470
471 if (!(vma->vm_flags & VM_SHARED))
472 return -EINVAL;
473
474 pdata = dev_get_platdata(&pdev->dev);
475
476 offset = vma->vm_pgoff << PAGE_SHIFT;
477 ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
478 if (ret)
479 return ret;
480
481 if (!(region.flags & DFL_PORT_REGION_MMAP))
482 return -EINVAL;
483
484 if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
485 return -EPERM;
486
487 if ((vma->vm_flags & VM_WRITE) &&
488 !(region.flags & DFL_PORT_REGION_WRITE))
489 return -EPERM;
490
491 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
492
493 return remap_pfn_range(vma, vma->vm_start,
494 (region.phys + (offset - region.offset)) >> PAGE_SHIFT,
495 size, vma->vm_page_prot);
496}
497
498static const struct file_operations afu_fops = {
499 .owner = THIS_MODULE,
500 .open = afu_open,
501 .release = afu_release,
502 .unlocked_ioctl = afu_ioctl,
503 .mmap = afu_mmap,
504};
505
506static int afu_dev_init(struct platform_device *pdev)
507{
508 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
509 struct dfl_afu *afu;
510
511 afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
512 if (!afu)
513 return -ENOMEM;
514
515 afu->pdata = pdata;
516
517 mutex_lock(&pdata->lock);
518 dfl_fpga_pdata_set_private(pdata, afu);
519 afu_mmio_region_init(pdata);
520 afu_dma_region_init(pdata);
521 mutex_unlock(&pdata->lock);
522
523 return 0;
524}
525
526static int afu_dev_destroy(struct platform_device *pdev)
527{
528 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
529 struct dfl_afu *afu;
530
531 mutex_lock(&pdata->lock);
532 afu = dfl_fpga_pdata_get_private(pdata);
533 afu_mmio_region_destroy(pdata);
534 afu_dma_region_destroy(pdata);
535 dfl_fpga_pdata_set_private(pdata, NULL);
536 mutex_unlock(&pdata->lock);
537
538 return 0;
539}
540
541static int port_enable_set(struct platform_device *pdev, bool enable)
542{
543 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
544 int ret = 0;
545
546 mutex_lock(&pdata->lock);
547 if (enable)
548 port_enable(pdev);
549 else
550 ret = port_disable(pdev);
551 mutex_unlock(&pdata->lock);
552
553 return ret;
554}
555
556static struct dfl_fpga_port_ops afu_port_ops = {
557 .name = DFL_FPGA_FEATURE_DEV_PORT,
558 .owner = THIS_MODULE,
559 .get_id = port_get_id,
560 .enable_set = port_enable_set,
561};
562
563static int afu_probe(struct platform_device *pdev)
564{
565 int ret;
566
567 dev_dbg(&pdev->dev, "%s\n", __func__);
568
569 ret = afu_dev_init(pdev);
570 if (ret)
571 goto exit;
572
573 ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
574 if (ret)
575 goto dev_destroy;
576
577 ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
578 if (ret) {
579 dfl_fpga_dev_feature_uinit(pdev);
580 goto dev_destroy;
581 }
582
583 return 0;
584
585dev_destroy:
586 afu_dev_destroy(pdev);
587exit:
588 return ret;
589}
590
591static int afu_remove(struct platform_device *pdev)
592{
593 dev_dbg(&pdev->dev, "%s\n", __func__);
594
595 dfl_fpga_dev_ops_unregister(pdev);
596 dfl_fpga_dev_feature_uinit(pdev);
597 afu_dev_destroy(pdev);
598
599 return 0;
600}
601
602static struct platform_driver afu_driver = {
603 .driver = {
604 .name = DFL_FPGA_FEATURE_DEV_PORT,
605 },
606 .probe = afu_probe,
607 .remove = afu_remove,
608};
609
610static int __init afu_init(void)
611{
612 int ret;
613
614 dfl_fpga_port_ops_add(&afu_port_ops);
615
616 ret = platform_driver_register(&afu_driver);
617 if (ret)
618 dfl_fpga_port_ops_del(&afu_port_ops);
619
620 return ret;
621}
622
623static void __exit afu_exit(void)
624{
625 platform_driver_unregister(&afu_driver);
626
627 dfl_fpga_port_ops_del(&afu_port_ops);
628}
629
630module_init(afu_init);
631module_exit(afu_exit);
632
633MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
634MODULE_AUTHOR("Intel Corporation");
635MODULE_LICENSE("GPL v2");
636MODULE_ALIAS("platform:dfl-port");
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
new file mode 100644
index 000000000000..0804b7a0c298
--- /dev/null
+++ b/drivers/fpga/dfl-afu-region.c
@@ -0,0 +1,166 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Accelerated Function Unit (AFU) MMIO Region Management
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 */
11#include "dfl-afu.h"
12
13/**
14 * afu_mmio_region_init - init function for afu mmio region support
15 * @pdata: afu platform device's pdata.
16 */
17void afu_mmio_region_init(struct dfl_feature_platform_data *pdata)
18{
19 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
20
21 INIT_LIST_HEAD(&afu->regions);
22}
23
24#define for_each_region(region, afu) \
25 list_for_each_entry((region), &(afu)->regions, node)
26
27static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
28 u32 region_index)
29{
30 struct dfl_afu_mmio_region *region;
31
32 for_each_region(region, afu)
33 if (region->index == region_index)
34 return region;
35
36 return NULL;
37}
38
39/**
40 * afu_mmio_region_add - add a mmio region to given feature dev.
41 *
42 * @region_index: region index.
43 * @region_size: region size.
44 * @phys: region's physical address of this region.
45 * @flags: region flags (access permission).
46 *
47 * Return: 0 on success, negative error code otherwise.
48 */
49int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
50 u32 region_index, u64 region_size, u64 phys, u32 flags)
51{
52 struct dfl_afu_mmio_region *region;
53 struct dfl_afu *afu;
54 int ret = 0;
55
56 region = devm_kzalloc(&pdata->dev->dev, sizeof(*region), GFP_KERNEL);
57 if (!region)
58 return -ENOMEM;
59
60 region->index = region_index;
61 region->size = region_size;
62 region->phys = phys;
63 region->flags = flags;
64
65 mutex_lock(&pdata->lock);
66
67 afu = dfl_fpga_pdata_get_private(pdata);
68
69 /* check if @index already exists */
70 if (get_region_by_index(afu, region_index)) {
71 mutex_unlock(&pdata->lock);
72 ret = -EEXIST;
73 goto exit;
74 }
75
76 region_size = PAGE_ALIGN(region_size);
77 region->offset = afu->region_cur_offset;
78 list_add(&region->node, &afu->regions);
79
80 afu->region_cur_offset += region_size;
81 afu->num_regions++;
82 mutex_unlock(&pdata->lock);
83
84 return 0;
85
86exit:
87 devm_kfree(&pdata->dev->dev, region);
88 return ret;
89}
90
91/**
92 * afu_mmio_region_destroy - destroy all mmio regions under given feature dev.
93 * @pdata: afu platform device's pdata.
94 */
95void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata)
96{
97 struct dfl_afu *afu = dfl_fpga_pdata_get_private(pdata);
98 struct dfl_afu_mmio_region *tmp, *region;
99
100 list_for_each_entry_safe(region, tmp, &afu->regions, node)
101 devm_kfree(&pdata->dev->dev, region);
102}
103
104/**
105 * afu_mmio_region_get_by_index - find an afu region by index.
106 * @pdata: afu platform device's pdata.
107 * @region_index: region index.
108 * @pregion: ptr to region for result.
109 *
110 * Return: 0 on success, negative error code otherwise.
111 */
112int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
113 u32 region_index,
114 struct dfl_afu_mmio_region *pregion)
115{
116 struct dfl_afu_mmio_region *region;
117 struct dfl_afu *afu;
118 int ret = 0;
119
120 mutex_lock(&pdata->lock);
121 afu = dfl_fpga_pdata_get_private(pdata);
122 region = get_region_by_index(afu, region_index);
123 if (!region) {
124 ret = -EINVAL;
125 goto exit;
126 }
127 *pregion = *region;
128exit:
129 mutex_unlock(&pdata->lock);
130 return ret;
131}
132
133/**
134 * afu_mmio_region_get_by_offset - find an afu mmio region by offset and size
135 *
136 * @pdata: afu platform device's pdata.
137 * @offset: region offset from start of the device fd.
138 * @size: region size.
139 * @pregion: ptr to region for result.
140 *
141 * Find the region which fully contains the region described by input
142 * parameters (offset and size) from the feature dev's region linked list.
143 *
144 * Return: 0 on success, negative error code otherwise.
145 */
146int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
147 u64 offset, u64 size,
148 struct dfl_afu_mmio_region *pregion)
149{
150 struct dfl_afu_mmio_region *region;
151 struct dfl_afu *afu;
152 int ret = 0;
153
154 mutex_lock(&pdata->lock);
155 afu = dfl_fpga_pdata_get_private(pdata);
156 for_each_region(region, afu)
157 if (region->offset <= offset &&
158 region->offset + region->size >= offset + size) {
159 *pregion = *region;
160 goto exit;
161 }
162 ret = -EINVAL;
163exit:
164 mutex_unlock(&pdata->lock);
165 return ret;
166}
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
new file mode 100644
index 000000000000..0c7630ae3cda
--- /dev/null
+++ b/drivers/fpga/dfl-afu.h
@@ -0,0 +1,100 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Header file for FPGA Accelerated Function Unit (AFU) Driver
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#ifndef __DFL_AFU_H
18#define __DFL_AFU_H
19
20#include <linux/mm.h>
21
22#include "dfl.h"
23
24/**
25 * struct dfl_afu_mmio_region - afu mmio region data structure
26 *
27 * @index: region index.
28 * @flags: region flags (access permission).
29 * @size: region size.
30 * @offset: region offset from start of the device fd.
31 * @phys: region's physical address.
32 * @node: node to add to afu feature dev's region list.
33 */
34struct dfl_afu_mmio_region {
35 u32 index;
36 u32 flags;
37 u64 size;
38 u64 offset;
39 u64 phys;
40 struct list_head node;
41};
42
43/**
44 * struct fpga_afu_dma_region - afu DMA region data structure
45 *
46 * @user_addr: region userspace virtual address.
47 * @length: region length.
48 * @iova: region IO virtual address.
49 * @pages: ptr to pages of this region.
50 * @node: rb tree node.
51 * @in_use: flag to indicate if this region is in_use.
52 */
53struct dfl_afu_dma_region {
54 u64 user_addr;
55 u64 length;
56 u64 iova;
57 struct page **pages;
58 struct rb_node node;
59 bool in_use;
60};
61
62/**
63 * struct dfl_afu - afu device data structure
64 *
65 * @region_cur_offset: current region offset from start to the device fd.
66 * @num_regions: num of mmio regions.
67 * @regions: the mmio region linked list of this afu feature device.
68 * @dma_regions: root of dma regions rb tree.
69 * @num_umsgs: num of umsgs.
70 * @pdata: afu platform device's pdata.
71 */
72struct dfl_afu {
73 u64 region_cur_offset;
74 int num_regions;
75 u8 num_umsgs;
76 struct list_head regions;
77 struct rb_root dma_regions;
78
79 struct dfl_feature_platform_data *pdata;
80};
81
82void afu_mmio_region_init(struct dfl_feature_platform_data *pdata);
83int afu_mmio_region_add(struct dfl_feature_platform_data *pdata,
84 u32 region_index, u64 region_size, u64 phys, u32 flags);
85void afu_mmio_region_destroy(struct dfl_feature_platform_data *pdata);
86int afu_mmio_region_get_by_index(struct dfl_feature_platform_data *pdata,
87 u32 region_index,
88 struct dfl_afu_mmio_region *pregion);
89int afu_mmio_region_get_by_offset(struct dfl_feature_platform_data *pdata,
90 u64 offset, u64 size,
91 struct dfl_afu_mmio_region *pregion);
92void afu_dma_region_init(struct dfl_feature_platform_data *pdata);
93void afu_dma_region_destroy(struct dfl_feature_platform_data *pdata);
94int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
95 u64 user_addr, u64 length, u64 *iova);
96int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
97struct dfl_afu_dma_region *
98afu_dma_region_find(struct dfl_feature_platform_data *pdata,
99 u64 iova, u64 size);
100#endif /* __DFL_AFU_H */
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
new file mode 100644
index 000000000000..7cc041def8b3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-br.c
@@ -0,0 +1,114 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * FPGA Bridge Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Joseph Grecco <joe.grecco@intel.com>
10 * Enno Luebbers <enno.luebbers@intel.com>
11 * Tim Whisonant <tim.whisonant@intel.com>
12 * Ananda Ravuri <ananda.ravuri@intel.com>
13 * Henry Mitchel <henry.mitchel@intel.com>
14 */
15
16#include <linux/module.h>
17#include <linux/fpga/fpga-bridge.h>
18
19#include "dfl.h"
20#include "dfl-fme-pr.h"
21
22struct fme_br_priv {
23 struct dfl_fme_br_pdata *pdata;
24 struct dfl_fpga_port_ops *port_ops;
25 struct platform_device *port_pdev;
26};
27
28static int fme_bridge_enable_set(struct fpga_bridge *bridge, bool enable)
29{
30 struct fme_br_priv *priv = bridge->priv;
31 struct platform_device *port_pdev;
32 struct dfl_fpga_port_ops *ops;
33
34 if (!priv->port_pdev) {
35 port_pdev = dfl_fpga_cdev_find_port(priv->pdata->cdev,
36 &priv->pdata->port_id,
37 dfl_fpga_check_port_id);
38 if (!port_pdev)
39 return -ENODEV;
40
41 priv->port_pdev = port_pdev;
42 }
43
44 if (priv->port_pdev && !priv->port_ops) {
45 ops = dfl_fpga_port_ops_get(priv->port_pdev);
46 if (!ops || !ops->enable_set)
47 return -ENOENT;
48
49 priv->port_ops = ops;
50 }
51
52 return priv->port_ops->enable_set(priv->port_pdev, enable);
53}
54
55static const struct fpga_bridge_ops fme_bridge_ops = {
56 .enable_set = fme_bridge_enable_set,
57};
58
59static int fme_br_probe(struct platform_device *pdev)
60{
61 struct device *dev = &pdev->dev;
62 struct fme_br_priv *priv;
63 struct fpga_bridge *br;
64 int ret;
65
66 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
67 if (!priv)
68 return -ENOMEM;
69
70 priv->pdata = dev_get_platdata(dev);
71
72 br = fpga_bridge_create(dev, "DFL FPGA FME Bridge",
73 &fme_bridge_ops, priv);
74 if (!br)
75 return -ENOMEM;
76
77 platform_set_drvdata(pdev, br);
78
79 ret = fpga_bridge_register(br);
80 if (ret)
81 fpga_bridge_free(br);
82
83 return ret;
84}
85
86static int fme_br_remove(struct platform_device *pdev)
87{
88 struct fpga_bridge *br = platform_get_drvdata(pdev);
89 struct fme_br_priv *priv = br->priv;
90
91 fpga_bridge_unregister(br);
92
93 if (priv->port_pdev)
94 put_device(&priv->port_pdev->dev);
95 if (priv->port_ops)
96 dfl_fpga_port_ops_put(priv->port_ops);
97
98 return 0;
99}
100
101static struct platform_driver fme_br_driver = {
102 .driver = {
103 .name = DFL_FPGA_FME_BRIDGE,
104 },
105 .probe = fme_br_probe,
106 .remove = fme_br_remove,
107};
108
109module_platform_driver(fme_br_driver);
110
111MODULE_DESCRIPTION("FPGA Bridge for DFL FPGA Management Engine");
112MODULE_AUTHOR("Intel Corporation");
113MODULE_LICENSE("GPL v2");
114MODULE_ALIAS("platform:dfl-fme-bridge");
diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c
new file mode 100644
index 000000000000..086ad2420ade
--- /dev/null
+++ b/drivers/fpga/dfl-fme-main.c
@@ -0,0 +1,279 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/fpga-dfl.h>
20
21#include "dfl.h"
22#include "dfl-fme.h"
23
24static ssize_t ports_num_show(struct device *dev,
25 struct device_attribute *attr, char *buf)
26{
27 void __iomem *base;
28 u64 v;
29
30 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
31
32 v = readq(base + FME_HDR_CAP);
33
34 return scnprintf(buf, PAGE_SIZE, "%u\n",
35 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
36}
37static DEVICE_ATTR_RO(ports_num);
38
39/*
40 * Bitstream (static FPGA region) identifier number. It contains the
41 * detailed version and other information of this static FPGA region.
42 */
43static ssize_t bitstream_id_show(struct device *dev,
44 struct device_attribute *attr, char *buf)
45{
46 void __iomem *base;
47 u64 v;
48
49 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
50
51 v = readq(base + FME_HDR_BITSTREAM_ID);
52
53 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
54}
55static DEVICE_ATTR_RO(bitstream_id);
56
57/*
58 * Bitstream (static FPGA region) meta data. It contains the synthesis
59 * date, seed and other information of this static FPGA region.
60 */
61static ssize_t bitstream_metadata_show(struct device *dev,
62 struct device_attribute *attr, char *buf)
63{
64 void __iomem *base;
65 u64 v;
66
67 base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_HEADER);
68
69 v = readq(base + FME_HDR_BITSTREAM_MD);
70
71 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
72}
73static DEVICE_ATTR_RO(bitstream_metadata);
74
75static const struct attribute *fme_hdr_attrs[] = {
76 &dev_attr_ports_num.attr,
77 &dev_attr_bitstream_id.attr,
78 &dev_attr_bitstream_metadata.attr,
79 NULL,
80};
81
82static int fme_hdr_init(struct platform_device *pdev,
83 struct dfl_feature *feature)
84{
85 void __iomem *base = feature->ioaddr;
86 int ret;
87
88 dev_dbg(&pdev->dev, "FME HDR Init.\n");
89 dev_dbg(&pdev->dev, "FME cap %llx.\n",
90 (unsigned long long)readq(base + FME_HDR_CAP));
91
92 ret = sysfs_create_files(&pdev->dev.kobj, fme_hdr_attrs);
93 if (ret)
94 return ret;
95
96 return 0;
97}
98
99static void fme_hdr_uinit(struct platform_device *pdev,
100 struct dfl_feature *feature)
101{
102 dev_dbg(&pdev->dev, "FME HDR UInit.\n");
103 sysfs_remove_files(&pdev->dev.kobj, fme_hdr_attrs);
104}
105
106static const struct dfl_feature_ops fme_hdr_ops = {
107 .init = fme_hdr_init,
108 .uinit = fme_hdr_uinit,
109};
110
111static struct dfl_feature_driver fme_feature_drvs[] = {
112 {
113 .id = FME_FEATURE_ID_HEADER,
114 .ops = &fme_hdr_ops,
115 },
116 {
117 .id = FME_FEATURE_ID_PR_MGMT,
118 .ops = &pr_mgmt_ops,
119 },
120 {
121 .ops = NULL,
122 },
123};
124
125static long fme_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
126 unsigned long arg)
127{
128 /* No extension support for now */
129 return 0;
130}
131
132static int fme_open(struct inode *inode, struct file *filp)
133{
134 struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
135 struct dfl_feature_platform_data *pdata = dev_get_platdata(&fdev->dev);
136 int ret;
137
138 if (WARN_ON(!pdata))
139 return -ENODEV;
140
141 ret = dfl_feature_dev_use_begin(pdata);
142 if (ret)
143 return ret;
144
145 dev_dbg(&fdev->dev, "Device File Open\n");
146 filp->private_data = pdata;
147
148 return 0;
149}
150
151static int fme_release(struct inode *inode, struct file *filp)
152{
153 struct dfl_feature_platform_data *pdata = filp->private_data;
154 struct platform_device *pdev = pdata->dev;
155
156 dev_dbg(&pdev->dev, "Device File Release\n");
157 dfl_feature_dev_use_end(pdata);
158
159 return 0;
160}
161
162static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
163{
164 struct dfl_feature_platform_data *pdata = filp->private_data;
165 struct platform_device *pdev = pdata->dev;
166 struct dfl_feature *f;
167 long ret;
168
169 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
170
171 switch (cmd) {
172 case DFL_FPGA_GET_API_VERSION:
173 return DFL_FPGA_API_VERSION;
174 case DFL_FPGA_CHECK_EXTENSION:
175 return fme_ioctl_check_extension(pdata, arg);
176 default:
177 /*
178 * Let sub-feature's ioctl function to handle the cmd.
179 * Sub-feature's ioctl returns -ENODEV when cmd is not
180 * handled in this sub feature, and returns 0 or other
181 * error code if cmd is handled.
182 */
183 dfl_fpga_dev_for_each_feature(pdata, f) {
184 if (f->ops && f->ops->ioctl) {
185 ret = f->ops->ioctl(pdev, f, cmd, arg);
186 if (ret != -ENODEV)
187 return ret;
188 }
189 }
190 }
191
192 return -EINVAL;
193}
194
195static int fme_dev_init(struct platform_device *pdev)
196{
197 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
198 struct dfl_fme *fme;
199
200 fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
201 if (!fme)
202 return -ENOMEM;
203
204 fme->pdata = pdata;
205
206 mutex_lock(&pdata->lock);
207 dfl_fpga_pdata_set_private(pdata, fme);
208 mutex_unlock(&pdata->lock);
209
210 return 0;
211}
212
213static void fme_dev_destroy(struct platform_device *pdev)
214{
215 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
216 struct dfl_fme *fme;
217
218 mutex_lock(&pdata->lock);
219 fme = dfl_fpga_pdata_get_private(pdata);
220 dfl_fpga_pdata_set_private(pdata, NULL);
221 mutex_unlock(&pdata->lock);
222}
223
224static const struct file_operations fme_fops = {
225 .owner = THIS_MODULE,
226 .open = fme_open,
227 .release = fme_release,
228 .unlocked_ioctl = fme_ioctl,
229};
230
231static int fme_probe(struct platform_device *pdev)
232{
233 int ret;
234
235 ret = fme_dev_init(pdev);
236 if (ret)
237 goto exit;
238
239 ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
240 if (ret)
241 goto dev_destroy;
242
243 ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
244 if (ret)
245 goto feature_uinit;
246
247 return 0;
248
249feature_uinit:
250 dfl_fpga_dev_feature_uinit(pdev);
251dev_destroy:
252 fme_dev_destroy(pdev);
253exit:
254 return ret;
255}
256
257static int fme_remove(struct platform_device *pdev)
258{
259 dfl_fpga_dev_ops_unregister(pdev);
260 dfl_fpga_dev_feature_uinit(pdev);
261 fme_dev_destroy(pdev);
262
263 return 0;
264}
265
266static struct platform_driver fme_driver = {
267 .driver = {
268 .name = DFL_FPGA_FEATURE_DEV_FME,
269 },
270 .probe = fme_probe,
271 .remove = fme_remove,
272};
273
274module_platform_driver(fme_driver);
275
276MODULE_DESCRIPTION("FPGA Management Engine driver");
277MODULE_AUTHOR("Intel Corporation");
278MODULE_LICENSE("GPL v2");
279MODULE_ALIAS("platform:dfl-fme");
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
new file mode 100644
index 000000000000..b5ef405b6d88
--- /dev/null
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -0,0 +1,349 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * FPGA Manager Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Christopher Rauer <christopher.rauer@intel.com>
16 * Henry Mitchel <henry.mitchel@intel.com>
17 */
18
19#include <linux/bitfield.h>
20#include <linux/module.h>
21#include <linux/iopoll.h>
22#include <linux/io-64-nonatomic-lo-hi.h>
23#include <linux/fpga/fpga-mgr.h>
24
25#include "dfl-fme-pr.h"
26
27/* FME Partial Reconfiguration Sub Feature Register Set */
28#define FME_PR_DFH 0x0
29#define FME_PR_CTRL 0x8
30#define FME_PR_STS 0x10
31#define FME_PR_DATA 0x18
32#define FME_PR_ERR 0x20
33#define FME_PR_INTFC_ID_H 0xA8
34#define FME_PR_INTFC_ID_L 0xB0
35
36/* FME PR Control Register Bitfield */
37#define FME_PR_CTRL_PR_RST BIT_ULL(0) /* Reset PR engine */
38#define FME_PR_CTRL_PR_RSTACK BIT_ULL(4) /* Ack for PR engine reset */
39#define FME_PR_CTRL_PR_RGN_ID GENMASK_ULL(9, 7) /* PR Region ID */
40#define FME_PR_CTRL_PR_START BIT_ULL(12) /* Start to request PR service */
41#define FME_PR_CTRL_PR_COMPLETE BIT_ULL(13) /* PR data push completion */
42
43/* FME PR Status Register Bitfield */
44/* Number of available entries in HW queue inside the PR engine. */
45#define FME_PR_STS_PR_CREDIT GENMASK_ULL(8, 0)
46#define FME_PR_STS_PR_STS BIT_ULL(16) /* PR operation status */
47#define FME_PR_STS_PR_STS_IDLE 0
48#define FME_PR_STS_PR_CTRLR_STS GENMASK_ULL(22, 20) /* Controller status */
49#define FME_PR_STS_PR_HOST_STS GENMASK_ULL(27, 24) /* PR host status */
50
51/* FME PR Data Register Bitfield */
52/* PR data from the raw-binary file. */
53#define FME_PR_DATA_PR_DATA_RAW GENMASK_ULL(32, 0)
54
55/* FME PR Error Register */
56/* PR Operation errors detected. */
57#define FME_PR_ERR_OPERATION_ERR BIT_ULL(0)
58/* CRC error detected. */
59#define FME_PR_ERR_CRC_ERR BIT_ULL(1)
60/* Incompatible PR bitstream detected. */
61#define FME_PR_ERR_INCOMPATIBLE_BS BIT_ULL(2)
62/* PR data push protocol violated. */
63#define FME_PR_ERR_PROTOCOL_ERR BIT_ULL(3)
64/* PR data fifo overflow error detected */
65#define FME_PR_ERR_FIFO_OVERFLOW BIT_ULL(4)
66
67#define PR_WAIT_TIMEOUT 8000000
68#define PR_HOST_STATUS_IDLE 0
69
70struct fme_mgr_priv {
71 void __iomem *ioaddr;
72 u64 pr_error;
73};
74
75static u64 pr_error_to_mgr_status(u64 err)
76{
77 u64 status = 0;
78
79 if (err & FME_PR_ERR_OPERATION_ERR)
80 status |= FPGA_MGR_STATUS_OPERATION_ERR;
81 if (err & FME_PR_ERR_CRC_ERR)
82 status |= FPGA_MGR_STATUS_CRC_ERR;
83 if (err & FME_PR_ERR_INCOMPATIBLE_BS)
84 status |= FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR;
85 if (err & FME_PR_ERR_PROTOCOL_ERR)
86 status |= FPGA_MGR_STATUS_IP_PROTOCOL_ERR;
87 if (err & FME_PR_ERR_FIFO_OVERFLOW)
88 status |= FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR;
89
90 return status;
91}
92
93static u64 fme_mgr_pr_error_handle(void __iomem *fme_pr)
94{
95 u64 pr_status, pr_error;
96
97 pr_status = readq(fme_pr + FME_PR_STS);
98 if (!(pr_status & FME_PR_STS_PR_STS))
99 return 0;
100
101 pr_error = readq(fme_pr + FME_PR_ERR);
102 writeq(pr_error, fme_pr + FME_PR_ERR);
103
104 return pr_error;
105}
106
107static int fme_mgr_write_init(struct fpga_manager *mgr,
108 struct fpga_image_info *info,
109 const char *buf, size_t count)
110{
111 struct device *dev = &mgr->dev;
112 struct fme_mgr_priv *priv = mgr->priv;
113 void __iomem *fme_pr = priv->ioaddr;
114 u64 pr_ctrl, pr_status;
115
116 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
117 dev_err(dev, "only supports partial reconfiguration.\n");
118 return -EINVAL;
119 }
120
121 dev_dbg(dev, "resetting PR before initiated PR\n");
122
123 pr_ctrl = readq(fme_pr + FME_PR_CTRL);
124 pr_ctrl |= FME_PR_CTRL_PR_RST;
125 writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
126
127 if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
128 pr_ctrl & FME_PR_CTRL_PR_RSTACK, 1,
129 PR_WAIT_TIMEOUT)) {
130 dev_err(dev, "PR Reset ACK timeout\n");
131 return -ETIMEDOUT;
132 }
133
134 pr_ctrl = readq(fme_pr + FME_PR_CTRL);
135 pr_ctrl &= ~FME_PR_CTRL_PR_RST;
136 writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
137
138 dev_dbg(dev,
139 "waiting for PR resource in HW to be initialized and ready\n");
140
141 if (readq_poll_timeout(fme_pr + FME_PR_STS, pr_status,
142 (pr_status & FME_PR_STS_PR_STS) ==
143 FME_PR_STS_PR_STS_IDLE, 1, PR_WAIT_TIMEOUT)) {
144 dev_err(dev, "PR Status timeout\n");
145 priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
146 return -ETIMEDOUT;
147 }
148
149 dev_dbg(dev, "check and clear previous PR error\n");
150 priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
151 if (priv->pr_error)
152 dev_dbg(dev, "previous PR error detected %llx\n",
153 (unsigned long long)priv->pr_error);
154
155 dev_dbg(dev, "set PR port ID\n");
156
157 pr_ctrl = readq(fme_pr + FME_PR_CTRL);
158 pr_ctrl &= ~FME_PR_CTRL_PR_RGN_ID;
159 pr_ctrl |= FIELD_PREP(FME_PR_CTRL_PR_RGN_ID, info->region_id);
160 writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
161
162 return 0;
163}
164
165static int fme_mgr_write(struct fpga_manager *mgr,
166 const char *buf, size_t count)
167{
168 struct device *dev = &mgr->dev;
169 struct fme_mgr_priv *priv = mgr->priv;
170 void __iomem *fme_pr = priv->ioaddr;
171 u64 pr_ctrl, pr_status, pr_data;
172 int delay = 0, pr_credit, i = 0;
173
174 dev_dbg(dev, "start request\n");
175
176 pr_ctrl = readq(fme_pr + FME_PR_CTRL);
177 pr_ctrl |= FME_PR_CTRL_PR_START;
178 writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
179
180 dev_dbg(dev, "pushing data from bitstream to HW\n");
181
182 /*
183 * driver can push data to PR hardware using PR_DATA register once HW
184 * has enough pr_credit (> 1), pr_credit reduces one for every 32bit
185 * pr data write to PR_DATA register. If pr_credit <= 1, driver needs
186 * to wait for enough pr_credit from hardware by polling.
187 */
188 pr_status = readq(fme_pr + FME_PR_STS);
189 pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
190
191 while (count > 0) {
192 while (pr_credit <= 1) {
193 if (delay++ > PR_WAIT_TIMEOUT) {
194 dev_err(dev, "PR_CREDIT timeout\n");
195 return -ETIMEDOUT;
196 }
197 udelay(1);
198
199 pr_status = readq(fme_pr + FME_PR_STS);
200 pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status);
201 }
202
203 if (count < 4) {
204 dev_err(dev, "Invaild PR bitstream size\n");
205 return -EINVAL;
206 }
207
208 pr_data = 0;
209 pr_data |= FIELD_PREP(FME_PR_DATA_PR_DATA_RAW,
210 *(((u32 *)buf) + i));
211 writeq(pr_data, fme_pr + FME_PR_DATA);
212 count -= 4;
213 pr_credit--;
214 i++;
215 }
216
217 return 0;
218}
219
220static int fme_mgr_write_complete(struct fpga_manager *mgr,
221 struct fpga_image_info *info)
222{
223 struct device *dev = &mgr->dev;
224 struct fme_mgr_priv *priv = mgr->priv;
225 void __iomem *fme_pr = priv->ioaddr;
226 u64 pr_ctrl;
227
228 pr_ctrl = readq(fme_pr + FME_PR_CTRL);
229 pr_ctrl |= FME_PR_CTRL_PR_COMPLETE;
230 writeq(pr_ctrl, fme_pr + FME_PR_CTRL);
231
232 dev_dbg(dev, "green bitstream push complete\n");
233 dev_dbg(dev, "waiting for HW to release PR resource\n");
234
235 if (readq_poll_timeout(fme_pr + FME_PR_CTRL, pr_ctrl,
236 !(pr_ctrl & FME_PR_CTRL_PR_START), 1,
237 PR_WAIT_TIMEOUT)) {
238 dev_err(dev, "PR Completion ACK timeout.\n");
239 return -ETIMEDOUT;
240 }
241
242 dev_dbg(dev, "PR operation complete, checking status\n");
243 priv->pr_error = fme_mgr_pr_error_handle(fme_pr);
244 if (priv->pr_error) {
245 dev_dbg(dev, "PR error detected %llx\n",
246 (unsigned long long)priv->pr_error);
247 return -EIO;
248 }
249
250 dev_dbg(dev, "PR done successfully\n");
251
252 return 0;
253}
254
255static enum fpga_mgr_states fme_mgr_state(struct fpga_manager *mgr)
256{
257 return FPGA_MGR_STATE_UNKNOWN;
258}
259
260static u64 fme_mgr_status(struct fpga_manager *mgr)
261{
262 struct fme_mgr_priv *priv = mgr->priv;
263
264 return pr_error_to_mgr_status(priv->pr_error);
265}
266
267static const struct fpga_manager_ops fme_mgr_ops = {
268 .write_init = fme_mgr_write_init,
269 .write = fme_mgr_write,
270 .write_complete = fme_mgr_write_complete,
271 .state = fme_mgr_state,
272 .status = fme_mgr_status,
273};
274
275static void fme_mgr_get_compat_id(void __iomem *fme_pr,
276 struct fpga_compat_id *id)
277{
278 id->id_l = readq(fme_pr + FME_PR_INTFC_ID_L);
279 id->id_h = readq(fme_pr + FME_PR_INTFC_ID_H);
280}
281
282static int fme_mgr_probe(struct platform_device *pdev)
283{
284 struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
285 struct fpga_compat_id *compat_id;
286 struct device *dev = &pdev->dev;
287 struct fme_mgr_priv *priv;
288 struct fpga_manager *mgr;
289 struct resource *res;
290 int ret;
291
292 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
293 if (!priv)
294 return -ENOMEM;
295
296 if (pdata->ioaddr)
297 priv->ioaddr = pdata->ioaddr;
298
299 if (!priv->ioaddr) {
300 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
301 priv->ioaddr = devm_ioremap_resource(dev, res);
302 if (IS_ERR(priv->ioaddr))
303 return PTR_ERR(priv->ioaddr);
304 }
305
306 compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL);
307 if (!compat_id)
308 return -ENOMEM;
309
310 fme_mgr_get_compat_id(priv->ioaddr, compat_id);
311
312 mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager",
313 &fme_mgr_ops, priv);
314 if (!mgr)
315 return -ENOMEM;
316
317 mgr->compat_id = compat_id;
318 platform_set_drvdata(pdev, mgr);
319
320 ret = fpga_mgr_register(mgr);
321 if (ret)
322 fpga_mgr_free(mgr);
323
324 return ret;
325}
326
327static int fme_mgr_remove(struct platform_device *pdev)
328{
329 struct fpga_manager *mgr = platform_get_drvdata(pdev);
330
331 fpga_mgr_unregister(mgr);
332
333 return 0;
334}
335
336static struct platform_driver fme_mgr_driver = {
337 .driver = {
338 .name = DFL_FPGA_FME_MGR,
339 },
340 .probe = fme_mgr_probe,
341 .remove = fme_mgr_remove,
342};
343
344module_platform_driver(fme_mgr_driver);
345
346MODULE_DESCRIPTION("FPGA Manager for DFL FPGA Management Engine");
347MODULE_AUTHOR("Intel Corporation");
348MODULE_LICENSE("GPL v2");
349MODULE_ALIAS("platform:dfl-fme-mgr");
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
new file mode 100644
index 000000000000..fc9fd2d0482f
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -0,0 +1,479 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Management Engine (FME) Partial Reconfiguration
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Christopher Rauer <christopher.rauer@intel.com>
16 * Henry Mitchel <henry.mitchel@intel.com>
17 */
18
19#include <linux/types.h>
20#include <linux/device.h>
21#include <linux/vmalloc.h>
22#include <linux/uaccess.h>
23#include <linux/fpga/fpga-mgr.h>
24#include <linux/fpga/fpga-bridge.h>
25#include <linux/fpga/fpga-region.h>
26#include <linux/fpga-dfl.h>
27
28#include "dfl.h"
29#include "dfl-fme.h"
30#include "dfl-fme-pr.h"
31
32static struct dfl_fme_region *
33dfl_fme_region_find_by_port_id(struct dfl_fme *fme, int port_id)
34{
35 struct dfl_fme_region *fme_region;
36
37 list_for_each_entry(fme_region, &fme->region_list, node)
38 if (fme_region->port_id == port_id)
39 return fme_region;
40
41 return NULL;
42}
43
44static int dfl_fme_region_match(struct device *dev, const void *data)
45{
46 return dev->parent == data;
47}
48
49static struct fpga_region *dfl_fme_region_find(struct dfl_fme *fme, int port_id)
50{
51 struct dfl_fme_region *fme_region;
52 struct fpga_region *region;
53
54 fme_region = dfl_fme_region_find_by_port_id(fme, port_id);
55 if (!fme_region)
56 return NULL;
57
58 region = fpga_region_class_find(NULL, &fme_region->region->dev,
59 dfl_fme_region_match);
60 if (!region)
61 return NULL;
62
63 return region;
64}
65
66static int fme_pr(struct platform_device *pdev, unsigned long arg)
67{
68 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
69 void __user *argp = (void __user *)arg;
70 struct dfl_fpga_fme_port_pr port_pr;
71 struct fpga_image_info *info;
72 struct fpga_region *region;
73 void __iomem *fme_hdr;
74 struct dfl_fme *fme;
75 unsigned long minsz;
76 void *buf = NULL;
77 int ret = 0;
78 u64 v;
79
80 minsz = offsetofend(struct dfl_fpga_fme_port_pr, buffer_address);
81
82 if (copy_from_user(&port_pr, argp, minsz))
83 return -EFAULT;
84
85 if (port_pr.argsz < minsz || port_pr.flags)
86 return -EINVAL;
87
88 if (!IS_ALIGNED(port_pr.buffer_size, 4))
89 return -EINVAL;
90
91 /* get fme header region */
92 fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
93 FME_FEATURE_ID_HEADER);
94
95 /* check port id */
96 v = readq(fme_hdr + FME_HDR_CAP);
97 if (port_pr.port_id >= FIELD_GET(FME_CAP_NUM_PORTS, v)) {
98 dev_dbg(&pdev->dev, "port number more than maximum\n");
99 return -EINVAL;
100 }
101
102 if (!access_ok(VERIFY_READ,
103 (void __user *)(unsigned long)port_pr.buffer_address,
104 port_pr.buffer_size))
105 return -EFAULT;
106
107 buf = vmalloc(port_pr.buffer_size);
108 if (!buf)
109 return -ENOMEM;
110
111 if (copy_from_user(buf,
112 (void __user *)(unsigned long)port_pr.buffer_address,
113 port_pr.buffer_size)) {
114 ret = -EFAULT;
115 goto free_exit;
116 }
117
118 /* prepare fpga_image_info for PR */
119 info = fpga_image_info_alloc(&pdev->dev);
120 if (!info) {
121 ret = -ENOMEM;
122 goto free_exit;
123 }
124
125 info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
126
127 mutex_lock(&pdata->lock);
128 fme = dfl_fpga_pdata_get_private(pdata);
129 /* fme device has been unregistered. */
130 if (!fme) {
131 ret = -EINVAL;
132 goto unlock_exit;
133 }
134
135 region = dfl_fme_region_find(fme, port_pr.port_id);
136 if (!region) {
137 ret = -EINVAL;
138 goto unlock_exit;
139 }
140
141 fpga_image_info_free(region->info);
142
143 info->buf = buf;
144 info->count = port_pr.buffer_size;
145 info->region_id = port_pr.port_id;
146 region->info = info;
147
148 ret = fpga_region_program_fpga(region);
149
150 /*
151 * it allows userspace to reset the PR region's logic by disabling and
152 * reenabling the bridge to clear things out between accleration runs.
153 * so no need to hold the bridges after partial reconfiguration.
154 */
155 if (region->get_bridges)
156 fpga_bridges_put(&region->bridge_list);
157
158 put_device(&region->dev);
159unlock_exit:
160 mutex_unlock(&pdata->lock);
161free_exit:
162 vfree(buf);
163 if (copy_to_user((void __user *)arg, &port_pr, minsz))
164 return -EFAULT;
165
166 return ret;
167}
168
169/**
170 * dfl_fme_create_mgr - create fpga mgr platform device as child device
171 *
172 * @pdata: fme platform_device's pdata
173 *
174 * Return: mgr platform device if successful, and error code otherwise.
175 */
176static struct platform_device *
177dfl_fme_create_mgr(struct dfl_feature_platform_data *pdata,
178 struct dfl_feature *feature)
179{
180 struct platform_device *mgr, *fme = pdata->dev;
181 struct dfl_fme_mgr_pdata mgr_pdata;
182 int ret = -ENOMEM;
183
184 if (!feature->ioaddr)
185 return ERR_PTR(-ENODEV);
186
187 mgr_pdata.ioaddr = feature->ioaddr;
188
189 /*
190 * Each FME has only one fpga-mgr, so allocate platform device using
191 * the same FME platform device id.
192 */
193 mgr = platform_device_alloc(DFL_FPGA_FME_MGR, fme->id);
194 if (!mgr)
195 return ERR_PTR(ret);
196
197 mgr->dev.parent = &fme->dev;
198
199 ret = platform_device_add_data(mgr, &mgr_pdata, sizeof(mgr_pdata));
200 if (ret)
201 goto create_mgr_err;
202
203 ret = platform_device_add(mgr);
204 if (ret)
205 goto create_mgr_err;
206
207 return mgr;
208
209create_mgr_err:
210 platform_device_put(mgr);
211 return ERR_PTR(ret);
212}
213
214/**
215 * dfl_fme_destroy_mgr - destroy fpga mgr platform device
216 * @pdata: fme platform device's pdata
217 */
218static void dfl_fme_destroy_mgr(struct dfl_feature_platform_data *pdata)
219{
220 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
221
222 platform_device_unregister(priv->mgr);
223}
224
225/**
226 * dfl_fme_create_bridge - create fme fpga bridge platform device as child
227 *
228 * @pdata: fme platform device's pdata
229 * @port_id: port id for the bridge to be created.
230 *
231 * Return: bridge platform device if successful, and error code otherwise.
232 */
233static struct dfl_fme_bridge *
234dfl_fme_create_bridge(struct dfl_feature_platform_data *pdata, int port_id)
235{
236 struct device *dev = &pdata->dev->dev;
237 struct dfl_fme_br_pdata br_pdata;
238 struct dfl_fme_bridge *fme_br;
239 int ret = -ENOMEM;
240
241 fme_br = devm_kzalloc(dev, sizeof(*fme_br), GFP_KERNEL);
242 if (!fme_br)
243 return ERR_PTR(ret);
244
245 br_pdata.cdev = pdata->dfl_cdev;
246 br_pdata.port_id = port_id;
247
248 fme_br->br = platform_device_alloc(DFL_FPGA_FME_BRIDGE,
249 PLATFORM_DEVID_AUTO);
250 if (!fme_br->br)
251 return ERR_PTR(ret);
252
253 fme_br->br->dev.parent = dev;
254
255 ret = platform_device_add_data(fme_br->br, &br_pdata, sizeof(br_pdata));
256 if (ret)
257 goto create_br_err;
258
259 ret = platform_device_add(fme_br->br);
260 if (ret)
261 goto create_br_err;
262
263 return fme_br;
264
265create_br_err:
266 platform_device_put(fme_br->br);
267 return ERR_PTR(ret);
268}
269
270/**
271 * dfl_fme_destroy_bridge - destroy fpga bridge platform device
272 * @fme_br: fme bridge to destroy
273 */
274static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
275{
276 platform_device_unregister(fme_br->br);
277}
278
279/**
280 * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
281 * @pdata: fme platform device's pdata
282 */
283static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
284{
285 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
286 struct dfl_fme_bridge *fbridge, *tmp;
287
288 list_for_each_entry_safe(fbridge, tmp, &priv->bridge_list, node) {
289 list_del(&fbridge->node);
290 dfl_fme_destroy_bridge(fbridge);
291 }
292}
293
294/**
295 * dfl_fme_create_region - create fpga region platform device as child
296 *
297 * @pdata: fme platform device's pdata
298 * @mgr: mgr platform device needed for region
299 * @br: br platform device needed for region
300 * @port_id: port id
301 *
302 * Return: fme region if successful, and error code otherwise.
303 */
304static struct dfl_fme_region *
305dfl_fme_create_region(struct dfl_feature_platform_data *pdata,
306 struct platform_device *mgr,
307 struct platform_device *br, int port_id)
308{
309 struct dfl_fme_region_pdata region_pdata;
310 struct device *dev = &pdata->dev->dev;
311 struct dfl_fme_region *fme_region;
312 int ret = -ENOMEM;
313
314 fme_region = devm_kzalloc(dev, sizeof(*fme_region), GFP_KERNEL);
315 if (!fme_region)
316 return ERR_PTR(ret);
317
318 region_pdata.mgr = mgr;
319 region_pdata.br = br;
320
321 /*
322 * Each FPGA device may have more than one port, so allocate platform
323 * device using the same port platform device id.
324 */
325 fme_region->region = platform_device_alloc(DFL_FPGA_FME_REGION, br->id);
326 if (!fme_region->region)
327 return ERR_PTR(ret);
328
329 fme_region->region->dev.parent = dev;
330
331 ret = platform_device_add_data(fme_region->region, &region_pdata,
332 sizeof(region_pdata));
333 if (ret)
334 goto create_region_err;
335
336 ret = platform_device_add(fme_region->region);
337 if (ret)
338 goto create_region_err;
339
340 fme_region->port_id = port_id;
341
342 return fme_region;
343
344create_region_err:
345 platform_device_put(fme_region->region);
346 return ERR_PTR(ret);
347}
348
349/**
350 * dfl_fme_destroy_region - destroy fme region
351 * @fme_region: fme region to destroy
352 */
353static void dfl_fme_destroy_region(struct dfl_fme_region *fme_region)
354{
355 platform_device_unregister(fme_region->region);
356}
357
358/**
359 * dfl_fme_destroy_regions - destroy all fme regions
360 * @pdata: fme platform device's pdata
361 */
362static void dfl_fme_destroy_regions(struct dfl_feature_platform_data *pdata)
363{
364 struct dfl_fme *priv = dfl_fpga_pdata_get_private(pdata);
365 struct dfl_fme_region *fme_region, *tmp;
366
367 list_for_each_entry_safe(fme_region, tmp, &priv->region_list, node) {
368 list_del(&fme_region->node);
369 dfl_fme_destroy_region(fme_region);
370 }
371}
372
373static int pr_mgmt_init(struct platform_device *pdev,
374 struct dfl_feature *feature)
375{
376 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
377 struct dfl_fme_region *fme_region;
378 struct dfl_fme_bridge *fme_br;
379 struct platform_device *mgr;
380 struct dfl_fme *priv;
381 void __iomem *fme_hdr;
382 int ret = -ENODEV, i = 0;
383 u64 fme_cap, port_offset;
384
385 fme_hdr = dfl_get_feature_ioaddr_by_id(&pdev->dev,
386 FME_FEATURE_ID_HEADER);
387
388 mutex_lock(&pdata->lock);
389 priv = dfl_fpga_pdata_get_private(pdata);
390
391 /* Initialize the region and bridge sub device list */
392 INIT_LIST_HEAD(&priv->region_list);
393 INIT_LIST_HEAD(&priv->bridge_list);
394
395 /* Create fpga mgr platform device */
396 mgr = dfl_fme_create_mgr(pdata, feature);
397 if (IS_ERR(mgr)) {
398 dev_err(&pdev->dev, "fail to create fpga mgr pdev\n");
399 goto unlock;
400 }
401
402 priv->mgr = mgr;
403
404 /* Read capability register to check number of regions and bridges */
405 fme_cap = readq(fme_hdr + FME_HDR_CAP);
406 for (; i < FIELD_GET(FME_CAP_NUM_PORTS, fme_cap); i++) {
407 port_offset = readq(fme_hdr + FME_HDR_PORT_OFST(i));
408 if (!(port_offset & FME_PORT_OFST_IMP))
409 continue;
410
411 /* Create bridge for each port */
412 fme_br = dfl_fme_create_bridge(pdata, i);
413 if (IS_ERR(fme_br)) {
414 ret = PTR_ERR(fme_br);
415 goto destroy_region;
416 }
417
418 list_add(&fme_br->node, &priv->bridge_list);
419
420 /* Create region for each port */
421 fme_region = dfl_fme_create_region(pdata, mgr,
422 fme_br->br, i);
423 if (!fme_region) {
424 ret = PTR_ERR(fme_region);
425 goto destroy_region;
426 }
427
428 list_add(&fme_region->node, &priv->region_list);
429 }
430 mutex_unlock(&pdata->lock);
431
432 return 0;
433
434destroy_region:
435 dfl_fme_destroy_regions(pdata);
436 dfl_fme_destroy_bridges(pdata);
437 dfl_fme_destroy_mgr(pdata);
438unlock:
439 mutex_unlock(&pdata->lock);
440 return ret;
441}
442
443static void pr_mgmt_uinit(struct platform_device *pdev,
444 struct dfl_feature *feature)
445{
446 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
447 struct dfl_fme *priv;
448
449 mutex_lock(&pdata->lock);
450 priv = dfl_fpga_pdata_get_private(pdata);
451
452 dfl_fme_destroy_regions(pdata);
453 dfl_fme_destroy_bridges(pdata);
454 dfl_fme_destroy_mgr(pdata);
455 mutex_unlock(&pdata->lock);
456}
457
458static long fme_pr_ioctl(struct platform_device *pdev,
459 struct dfl_feature *feature,
460 unsigned int cmd, unsigned long arg)
461{
462 long ret;
463
464 switch (cmd) {
465 case DFL_FPGA_FME_PORT_PR:
466 ret = fme_pr(pdev, arg);
467 break;
468 default:
469 ret = -ENODEV;
470 }
471
472 return ret;
473}
474
475const struct dfl_feature_ops pr_mgmt_ops = {
476 .init = pr_mgmt_init,
477 .uinit = pr_mgmt_uinit,
478 .ioctl = fme_pr_ioctl,
479};
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
new file mode 100644
index 000000000000..096a699089d3
--- /dev/null
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -0,0 +1,84 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Header file for FPGA Management Engine (FME) Partial Reconfiguration Driver
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Henry Mitchel <henry.mitchel@intel.com>
16 */
17
18#ifndef __DFL_FME_PR_H
19#define __DFL_FME_PR_H
20
21#include <linux/platform_device.h>
22
23/**
24 * struct dfl_fme_region - FME fpga region data structure
25 *
26 * @region: platform device of the FPGA region.
27 * @node: used to link fme_region to a list.
28 * @port_id: indicate which port this region connected to.
29 */
30struct dfl_fme_region {
31 struct platform_device *region;
32 struct list_head node;
33 int port_id;
34};
35
36/**
37 * struct dfl_fme_region_pdata - platform data for FME region platform device.
38 *
39 * @mgr: platform device of the FPGA manager.
40 * @br: platform device of the FPGA bridge.
41 * @region_id: region id (same as port_id).
42 */
43struct dfl_fme_region_pdata {
44 struct platform_device *mgr;
45 struct platform_device *br;
46 int region_id;
47};
48
49/**
50 * struct dfl_fme_bridge - FME fpga bridge data structure
51 *
52 * @br: platform device of the FPGA bridge.
53 * @node: used to link fme_bridge to a list.
54 */
55struct dfl_fme_bridge {
56 struct platform_device *br;
57 struct list_head node;
58};
59
60/**
61 * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
62 *
63 * @cdev: container device.
64 * @port_id: port id.
65 */
66struct dfl_fme_br_pdata {
67 struct dfl_fpga_cdev *cdev;
68 int port_id;
69};
70
71/**
72 * struct dfl_fme_mgr_pdata - platform data for FME manager platform device.
73 *
74 * @ioaddr: mapped io address for FME manager platform device.
75 */
76struct dfl_fme_mgr_pdata {
77 void __iomem *ioaddr;
78};
79
80#define DFL_FPGA_FME_MGR "dfl-fme-mgr"
81#define DFL_FPGA_FME_BRIDGE "dfl-fme-bridge"
82#define DFL_FPGA_FME_REGION "dfl-fme-region"
83
84#endif /* __DFL_FME_PR_H */
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
new file mode 100644
index 000000000000..0b7e19c27c6d
--- /dev/null
+++ b/drivers/fpga/dfl-fme-region.c
@@ -0,0 +1,89 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * FPGA Region Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <hao.wu@intel.com>
9 * Joseph Grecco <joe.grecco@intel.com>
10 * Enno Luebbers <enno.luebbers@intel.com>
11 * Tim Whisonant <tim.whisonant@intel.com>
12 * Ananda Ravuri <ananda.ravuri@intel.com>
13 * Henry Mitchel <henry.mitchel@intel.com>
14 */
15
16#include <linux/module.h>
17#include <linux/fpga/fpga-region.h>
18
19#include "dfl-fme-pr.h"
20
21static int fme_region_get_bridges(struct fpga_region *region)
22{
23 struct dfl_fme_region_pdata *pdata = region->priv;
24 struct device *dev = &pdata->br->dev;
25
26 return fpga_bridge_get_to_list(dev, region->info, &region->bridge_list);
27}
28
29static int fme_region_probe(struct platform_device *pdev)
30{
31 struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
32 struct device *dev = &pdev->dev;
33 struct fpga_region *region;
34 struct fpga_manager *mgr;
35 int ret;
36
37 mgr = fpga_mgr_get(&pdata->mgr->dev);
38 if (IS_ERR(mgr))
39 return -EPROBE_DEFER;
40
41 region = fpga_region_create(dev, mgr, fme_region_get_bridges);
42 if (!region) {
43 ret = -ENOMEM;
44 goto eprobe_mgr_put;
45 }
46
47 region->priv = pdata;
48 region->compat_id = mgr->compat_id;
49 platform_set_drvdata(pdev, region);
50
51 ret = fpga_region_register(region);
52 if (ret)
53 goto region_free;
54
55 dev_dbg(dev, "DFL FME FPGA Region probed\n");
56
57 return 0;
58
59region_free:
60 fpga_region_free(region);
61eprobe_mgr_put:
62 fpga_mgr_put(mgr);
63 return ret;
64}
65
66static int fme_region_remove(struct platform_device *pdev)
67{
68 struct fpga_region *region = dev_get_drvdata(&pdev->dev);
69
70 fpga_region_unregister(region);
71 fpga_mgr_put(region->mgr);
72
73 return 0;
74}
75
76static struct platform_driver fme_region_driver = {
77 .driver = {
78 .name = DFL_FPGA_FME_REGION,
79 },
80 .probe = fme_region_probe,
81 .remove = fme_region_remove,
82};
83
84module_platform_driver(fme_region_driver);
85
86MODULE_DESCRIPTION("FPGA Region for DFL FPGA Management Engine");
87MODULE_AUTHOR("Intel Corporation");
88MODULE_LICENSE("GPL v2");
89MODULE_ALIAS("platform:dfl-fme-region");
diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h
new file mode 100644
index 000000000000..5394a216c5c0
--- /dev/null
+++ b/drivers/fpga/dfl-fme.h
@@ -0,0 +1,38 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Header file for FPGA Management Engine (FME) Driver
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Joseph Grecco <joe.grecco@intel.com>
12 * Enno Luebbers <enno.luebbers@intel.com>
13 * Tim Whisonant <tim.whisonant@intel.com>
14 * Ananda Ravuri <ananda.ravuri@intel.com>
15 * Henry Mitchel <henry.mitchel@intel.com>
16 */
17
18#ifndef __DFL_FME_H
19#define __DFL_FME_H
20
21/**
22 * struct dfl_fme - dfl fme private data
23 *
24 * @mgr: FME's FPGA manager platform device.
25 * @region_list: linked list of FME's FPGA regions.
26 * @bridge_list: linked list of FME's FPGA bridges.
27 * @pdata: fme platform device's pdata.
28 */
29struct dfl_fme {
30 struct platform_device *mgr;
31 struct list_head region_list;
32 struct list_head bridge_list;
33 struct dfl_feature_platform_data *pdata;
34};
35
36extern const struct dfl_feature_ops pr_mgmt_ops;
37
38#endif /* __DFL_FME_H */
diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c
new file mode 100644
index 000000000000..66b5720582bb
--- /dev/null
+++ b/drivers/fpga/dfl-pci.c
@@ -0,0 +1,243 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) PCIe device
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Zhang Yi <Yi.Z.Zhang@intel.com>
9 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10 * Joseph Grecco <joe.grecco@intel.com>
11 * Enno Luebbers <enno.luebbers@intel.com>
12 * Tim Whisonant <tim.whisonant@intel.com>
13 * Ananda Ravuri <ananda.ravuri@intel.com>
14 * Henry Mitchel <henry.mitchel@intel.com>
15 */
16
17#include <linux/pci.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/stddef.h>
22#include <linux/errno.h>
23#include <linux/aer.h>
24
25#include "dfl.h"
26
27#define DRV_VERSION "0.8"
28#define DRV_NAME "dfl-pci"
29
30struct cci_drvdata {
31 struct dfl_fpga_cdev *cdev; /* container device */
32};
33
34static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar)
35{
36 if (pcim_iomap_regions(pcidev, BIT(bar), DRV_NAME))
37 return NULL;
38
39 return pcim_iomap_table(pcidev)[bar];
40}
41
42/* PCI Device ID */
43#define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD
44#define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0
45#define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4
46/* VF Device */
47#define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF
48#define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1
49#define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5
50
51static struct pci_device_id cci_pcie_id_tbl[] = {
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_5_X),},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_6_X),},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),},
58 {0,}
59};
60MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl);
61
62static int cci_init_drvdata(struct pci_dev *pcidev)
63{
64 struct cci_drvdata *drvdata;
65
66 drvdata = devm_kzalloc(&pcidev->dev, sizeof(*drvdata), GFP_KERNEL);
67 if (!drvdata)
68 return -ENOMEM;
69
70 pci_set_drvdata(pcidev, drvdata);
71
72 return 0;
73}
74
75static void cci_remove_feature_devs(struct pci_dev *pcidev)
76{
77 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
78
79 /* remove all children feature devices */
80 dfl_fpga_feature_devs_remove(drvdata->cdev);
81}
82
83/* enumerate feature devices under pci device */
84static int cci_enumerate_feature_devs(struct pci_dev *pcidev)
85{
86 struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
87 struct dfl_fpga_enum_info *info;
88 struct dfl_fpga_cdev *cdev;
89 resource_size_t start, len;
90 int port_num, bar, i, ret = 0;
91 void __iomem *base;
92 u32 offset;
93 u64 v;
94
95 /* allocate enumeration info via pci_dev */
96 info = dfl_fpga_enum_info_alloc(&pcidev->dev);
97 if (!info)
98 return -ENOMEM;
99
100 /* start to find Device Feature List from Bar 0 */
101 base = cci_pci_ioremap_bar(pcidev, 0);
102 if (!base) {
103 ret = -ENOMEM;
104 goto enum_info_free_exit;
105 }
106
107 /*
108 * PF device has FME and Ports/AFUs, and VF device only has one
109 * Port/AFU. Check them and add related "Device Feature List" info
110 * for the next step enumeration.
111 */
112 if (dfl_feature_is_fme(base)) {
113 start = pci_resource_start(pcidev, 0);
114 len = pci_resource_len(pcidev, 0);
115
116 dfl_fpga_enum_info_add_dfl(info, start, len, base);
117
118 /*
119 * find more Device Feature Lists (e.g. Ports) per information
120 * indicated by FME module.
121 */
122 v = readq(base + FME_HDR_CAP);
123 port_num = FIELD_GET(FME_CAP_NUM_PORTS, v);
124
125 WARN_ON(port_num > MAX_DFL_FPGA_PORT_NUM);
126
127 for (i = 0; i < port_num; i++) {
128 v = readq(base + FME_HDR_PORT_OFST(i));
129
130 /* skip ports which are not implemented. */
131 if (!(v & FME_PORT_OFST_IMP))
132 continue;
133
134 /*
135 * add Port's Device Feature List information for next
136 * step enumeration.
137 */
138 bar = FIELD_GET(FME_PORT_OFST_BAR_ID, v);
139 offset = FIELD_GET(FME_PORT_OFST_DFH_OFST, v);
140 base = cci_pci_ioremap_bar(pcidev, bar);
141 if (!base)
142 continue;
143
144 start = pci_resource_start(pcidev, bar) + offset;
145 len = pci_resource_len(pcidev, bar) - offset;
146
147 dfl_fpga_enum_info_add_dfl(info, start, len,
148 base + offset);
149 }
150 } else if (dfl_feature_is_port(base)) {
151 start = pci_resource_start(pcidev, 0);
152 len = pci_resource_len(pcidev, 0);
153
154 dfl_fpga_enum_info_add_dfl(info, start, len, base);
155 } else {
156 ret = -ENODEV;
157 goto enum_info_free_exit;
158 }
159
160 /* start enumeration with prepared enumeration information */
161 cdev = dfl_fpga_feature_devs_enumerate(info);
162 if (IS_ERR(cdev)) {
163 dev_err(&pcidev->dev, "Enumeration failure\n");
164 ret = PTR_ERR(cdev);
165 goto enum_info_free_exit;
166 }
167
168 drvdata->cdev = cdev;
169
170enum_info_free_exit:
171 dfl_fpga_enum_info_free(info);
172
173 return ret;
174}
175
176static
177int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid)
178{
179 int ret;
180
181 ret = pcim_enable_device(pcidev);
182 if (ret < 0) {
183 dev_err(&pcidev->dev, "Failed to enable device %d.\n", ret);
184 return ret;
185 }
186
187 ret = pci_enable_pcie_error_reporting(pcidev);
188 if (ret && ret != -EINVAL)
189 dev_info(&pcidev->dev, "PCIE AER unavailable %d.\n", ret);
190
191 pci_set_master(pcidev);
192
193 if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
194 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
195 if (ret)
196 goto disable_error_report_exit;
197 } else if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
198 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
199 if (ret)
200 goto disable_error_report_exit;
201 } else {
202 ret = -EIO;
203 dev_err(&pcidev->dev, "No suitable DMA support available.\n");
204 goto disable_error_report_exit;
205 }
206
207 ret = cci_init_drvdata(pcidev);
208 if (ret) {
209 dev_err(&pcidev->dev, "Fail to init drvdata %d.\n", ret);
210 goto disable_error_report_exit;
211 }
212
213 ret = cci_enumerate_feature_devs(pcidev);
214 if (ret) {
215 dev_err(&pcidev->dev, "enumeration failure %d.\n", ret);
216 goto disable_error_report_exit;
217 }
218
219 return ret;
220
221disable_error_report_exit:
222 pci_disable_pcie_error_reporting(pcidev);
223 return ret;
224}
225
226static void cci_pci_remove(struct pci_dev *pcidev)
227{
228 cci_remove_feature_devs(pcidev);
229 pci_disable_pcie_error_reporting(pcidev);
230}
231
232static struct pci_driver cci_pci_driver = {
233 .name = DRV_NAME,
234 .id_table = cci_pcie_id_tbl,
235 .probe = cci_pci_probe,
236 .remove = cci_pci_remove,
237};
238
239module_pci_driver(cci_pci_driver);
240
241MODULE_DESCRIPTION("FPGA DFL PCIe Device Driver");
242MODULE_AUTHOR("Intel Corporation");
243MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
new file mode 100644
index 000000000000..a9b521bccb06
--- /dev/null
+++ b/drivers/fpga/dfl.c
@@ -0,0 +1,1044 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Driver for FPGA Device Feature List (DFL) Support
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
12 */
13#include <linux/module.h>
14
15#include "dfl.h"
16
17static DEFINE_MUTEX(dfl_id_mutex);
18
19/*
20 * when adding a new feature dev support in DFL framework, it's required to
21 * add a new item in enum dfl_id_type and provide related information in below
22 * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
23 * platform device creation (define name strings in dfl.h, as they could be
24 * reused by platform device drivers).
25 *
26 * if the new feature dev needs chardev support, then it's required to add
27 * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
28 * index to dfl_chardevs table. If no chardev support just set devt_type
29 * as one invalid index (DFL_FPGA_DEVT_MAX).
30 */
31enum dfl_id_type {
32 FME_ID, /* fme id allocation and mapping */
33 PORT_ID, /* port id allocation and mapping */
34 DFL_ID_MAX,
35};
36
37enum dfl_fpga_devt_type {
38 DFL_FPGA_DEVT_FME,
39 DFL_FPGA_DEVT_PORT,
40 DFL_FPGA_DEVT_MAX,
41};
42
43/**
44 * dfl_dev_info - dfl feature device information.
45 * @name: name string of the feature platform device.
46 * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
47 * @id: idr id of the feature dev.
48 * @devt_type: index to dfl_chrdevs[].
49 */
50struct dfl_dev_info {
51 const char *name;
52 u32 dfh_id;
53 struct idr id;
54 enum dfl_fpga_devt_type devt_type;
55};
56
57/* it is indexed by dfl_id_type */
58static struct dfl_dev_info dfl_devs[] = {
59 {.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
60 .devt_type = DFL_FPGA_DEVT_FME},
61 {.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
62 .devt_type = DFL_FPGA_DEVT_PORT},
63};
64
65/**
66 * dfl_chardev_info - chardev information of dfl feature device
67 * @name: nmae string of the char device.
68 * @devt: devt of the char device.
69 */
70struct dfl_chardev_info {
71 const char *name;
72 dev_t devt;
73};
74
75/* indexed by enum dfl_fpga_devt_type */
76static struct dfl_chardev_info dfl_chrdevs[] = {
77 {.name = DFL_FPGA_FEATURE_DEV_FME},
78 {.name = DFL_FPGA_FEATURE_DEV_PORT},
79};
80
81static void dfl_ids_init(void)
82{
83 int i;
84
85 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
86 idr_init(&dfl_devs[i].id);
87}
88
89static void dfl_ids_destroy(void)
90{
91 int i;
92
93 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
94 idr_destroy(&dfl_devs[i].id);
95}
96
97static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
98{
99 int id;
100
101 WARN_ON(type >= DFL_ID_MAX);
102 mutex_lock(&dfl_id_mutex);
103 id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
104 mutex_unlock(&dfl_id_mutex);
105
106 return id;
107}
108
109static void dfl_id_free(enum dfl_id_type type, int id)
110{
111 WARN_ON(type >= DFL_ID_MAX);
112 mutex_lock(&dfl_id_mutex);
113 idr_remove(&dfl_devs[type].id, id);
114 mutex_unlock(&dfl_id_mutex);
115}
116
117static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
118{
119 int i;
120
121 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
122 if (!strcmp(dfl_devs[i].name, pdev->name))
123 return i;
124
125 return DFL_ID_MAX;
126}
127
128static enum dfl_id_type dfh_id_to_type(u32 id)
129{
130 int i;
131
132 for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
133 if (dfl_devs[i].dfh_id == id)
134 return i;
135
136 return DFL_ID_MAX;
137}
138
139/*
140 * introduce a global port_ops list, it allows port drivers to register ops
141 * in such list, then other feature devices (e.g. FME), could use the port
142 * functions even related port platform device is hidden. Below is one example,
143 * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
144 * enabled, port (and it's AFU) is turned into VF and port platform device
145 * is hidden from system but it's still required to access port to finish FPGA
146 * reconfiguration function in FME.
147 */
148
149static DEFINE_MUTEX(dfl_port_ops_mutex);
150static LIST_HEAD(dfl_port_ops_list);
151
152/**
153 * dfl_fpga_port_ops_get - get matched port ops from the global list
154 * @pdev: platform device to match with associated port ops.
155 * Return: matched port ops on success, NULL otherwise.
156 *
157 * Please note that must dfl_fpga_port_ops_put after use the port_ops.
158 */
159struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
160{
161 struct dfl_fpga_port_ops *ops = NULL;
162
163 mutex_lock(&dfl_port_ops_mutex);
164 if (list_empty(&dfl_port_ops_list))
165 goto done;
166
167 list_for_each_entry(ops, &dfl_port_ops_list, node) {
168 /* match port_ops using the name of platform device */
169 if (!strcmp(pdev->name, ops->name)) {
170 if (!try_module_get(ops->owner))
171 ops = NULL;
172 goto done;
173 }
174 }
175
176 ops = NULL;
177done:
178 mutex_unlock(&dfl_port_ops_mutex);
179 return ops;
180}
181EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
182
183/**
184 * dfl_fpga_port_ops_put - put port ops
185 * @ops: port ops.
186 */
187void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
188{
189 if (ops && ops->owner)
190 module_put(ops->owner);
191}
192EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
193
194/**
195 * dfl_fpga_port_ops_add - add port_ops to global list
196 * @ops: port ops to add.
197 */
198void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
199{
200 mutex_lock(&dfl_port_ops_mutex);
201 list_add_tail(&ops->node, &dfl_port_ops_list);
202 mutex_unlock(&dfl_port_ops_mutex);
203}
204EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
205
206/**
207 * dfl_fpga_port_ops_del - remove port_ops from global list
208 * @ops: port ops to del.
209 */
210void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
211{
212 mutex_lock(&dfl_port_ops_mutex);
213 list_del(&ops->node);
214 mutex_unlock(&dfl_port_ops_mutex);
215}
216EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
217
218/**
219 * dfl_fpga_check_port_id - check the port id
220 * @pdev: port platform device.
221 * @pport_id: port id to compare.
222 *
223 * Return: 1 if port device matches with given port id, otherwise 0.
224 */
225int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
226{
227 struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev);
228 int port_id;
229
230 if (!port_ops || !port_ops->get_id)
231 return 0;
232
233 port_id = port_ops->get_id(pdev);
234 dfl_fpga_port_ops_put(port_ops);
235
236 return port_id == *(int *)pport_id;
237}
238EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
239
240/**
241 * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
242 * @pdev: feature device.
243 */
244void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
245{
246 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
247 struct dfl_feature *feature;
248
249 dfl_fpga_dev_for_each_feature(pdata, feature)
250 if (feature->ops) {
251 feature->ops->uinit(pdev, feature);
252 feature->ops = NULL;
253 }
254}
255EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
256
257static int dfl_feature_instance_init(struct platform_device *pdev,
258 struct dfl_feature_platform_data *pdata,
259 struct dfl_feature *feature,
260 struct dfl_feature_driver *drv)
261{
262 int ret;
263
264 ret = drv->ops->init(pdev, feature);
265 if (ret)
266 return ret;
267
268 feature->ops = drv->ops;
269
270 return ret;
271}
272
273/**
274 * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
275 * @pdev: feature device.
276 * @feature_drvs: drvs for sub features.
277 *
278 * This function will match sub features with given feature drvs list and
279 * use matched drv to init related sub feature.
280 *
281 * Return: 0 on success, negative error code otherwise.
282 */
283int dfl_fpga_dev_feature_init(struct platform_device *pdev,
284 struct dfl_feature_driver *feature_drvs)
285{
286 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
287 struct dfl_feature_driver *drv = feature_drvs;
288 struct dfl_feature *feature;
289 int ret;
290
291 while (drv->ops) {
292 dfl_fpga_dev_for_each_feature(pdata, feature) {
293 /* match feature and drv using id */
294 if (feature->id == drv->id) {
295 ret = dfl_feature_instance_init(pdev, pdata,
296 feature, drv);
297 if (ret)
298 goto exit;
299 }
300 }
301 drv++;
302 }
303
304 return 0;
305exit:
306 dfl_fpga_dev_feature_uinit(pdev);
307 return ret;
308}
309EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
310
311static void dfl_chardev_uinit(void)
312{
313 int i;
314
315 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
316 if (MAJOR(dfl_chrdevs[i].devt)) {
317 unregister_chrdev_region(dfl_chrdevs[i].devt,
318 MINORMASK);
319 dfl_chrdevs[i].devt = MKDEV(0, 0);
320 }
321}
322
323static int dfl_chardev_init(void)
324{
325 int i, ret;
326
327 for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
328 ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
329 dfl_chrdevs[i].name);
330 if (ret)
331 goto exit;
332 }
333
334 return 0;
335
336exit:
337 dfl_chardev_uinit();
338 return ret;
339}
340
341static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
342{
343 if (type >= DFL_FPGA_DEVT_MAX)
344 return 0;
345
346 return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
347}
348
349/**
350 * dfl_fpga_dev_ops_register - register cdev ops for feature dev
351 *
352 * @pdev: feature dev.
353 * @fops: file operations for feature dev's cdev.
354 * @owner: owning module/driver.
355 *
356 * Return: 0 on success, negative error code otherwise.
357 */
358int dfl_fpga_dev_ops_register(struct platform_device *pdev,
359 const struct file_operations *fops,
360 struct module *owner)
361{
362 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
363
364 cdev_init(&pdata->cdev, fops);
365 pdata->cdev.owner = owner;
366
367 /*
368 * set parent to the feature device so that its refcount is
369 * decreased after the last refcount of cdev is gone, that
370 * makes sure the feature device is valid during device
371 * file's life-cycle.
372 */
373 pdata->cdev.kobj.parent = &pdev->dev.kobj;
374
375 return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
376}
377EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
378
379/**
380 * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
381 * @pdev: feature dev.
382 */
383void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
384{
385 struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
386
387 cdev_del(&pdata->cdev);
388}
389EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
390
391/**
392 * struct build_feature_devs_info - info collected during feature dev build.
393 *
394 * @dev: device to enumerate.
395 * @cdev: the container device for all feature devices.
396 * @feature_dev: current feature device.
397 * @ioaddr: header register region address of feature device in enumeration.
398 * @sub_features: a sub features linked list for feature device in enumeration.
399 * @feature_num: number of sub features for feature device in enumeration.
400 */
401struct build_feature_devs_info {
402 struct device *dev;
403 struct dfl_fpga_cdev *cdev;
404 struct platform_device *feature_dev;
405 void __iomem *ioaddr;
406 struct list_head sub_features;
407 int feature_num;
408};
409
410/**
411 * struct dfl_feature_info - sub feature info collected during feature dev build
412 *
413 * @fid: id of this sub feature.
414 * @mmio_res: mmio resource of this sub feature.
415 * @ioaddr: mapped base address of mmio resource.
416 * @node: node in sub_features linked list.
417 */
418struct dfl_feature_info {
419 u64 fid;
420 struct resource mmio_res;
421 void __iomem *ioaddr;
422 struct list_head node;
423};
424
425static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
426 struct platform_device *port)
427{
428 struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
429
430 mutex_lock(&cdev->lock);
431 list_add(&pdata->node, &cdev->port_dev_list);
432 get_device(&pdata->dev->dev);
433 mutex_unlock(&cdev->lock);
434}
435
436/*
437 * register current feature device, it is called when we need to switch to
438 * another feature parsing or we have parsed all features on given device
439 * feature list.
440 */
441static int build_info_commit_dev(struct build_feature_devs_info *binfo)
442{
443 struct platform_device *fdev = binfo->feature_dev;
444 struct dfl_feature_platform_data *pdata;
445 struct dfl_feature_info *finfo, *p;
446 int ret, index = 0;
447
448 if (!fdev)
449 return 0;
450
451 /*
452 * we do not need to care for the memory which is associated with
453 * the platform device. After calling platform_device_unregister(),
454 * it will be automatically freed by device's release() callback,
455 * platform_device_release().
456 */
457 pdata = kzalloc(dfl_feature_platform_data_size(binfo->feature_num),
458 GFP_KERNEL);
459 if (!pdata)
460 return -ENOMEM;
461
462 pdata->dev = fdev;
463 pdata->num = binfo->feature_num;
464 pdata->dfl_cdev = binfo->cdev;
465 mutex_init(&pdata->lock);
466
467 /*
468 * the count should be initialized to 0 to make sure
469 *__fpga_port_enable() following __fpga_port_disable()
470 * works properly for port device.
471 * and it should always be 0 for fme device.
472 */
473 WARN_ON(pdata->disable_count);
474
475 fdev->dev.platform_data = pdata;
476
477 /* each sub feature has one MMIO resource */
478 fdev->num_resources = binfo->feature_num;
479 fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
480 GFP_KERNEL);
481 if (!fdev->resource)
482 return -ENOMEM;
483
484 /* fill features and resource information for feature dev */
485 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
486 struct dfl_feature *feature = &pdata->features[index];
487
488 /* save resource information for each feature */
489 feature->id = finfo->fid;
490 feature->resource_index = index;
491 feature->ioaddr = finfo->ioaddr;
492 fdev->resource[index++] = finfo->mmio_res;
493
494 list_del(&finfo->node);
495 kfree(finfo);
496 }
497
498 ret = platform_device_add(binfo->feature_dev);
499 if (!ret) {
500 if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
501 dfl_fpga_cdev_add_port_dev(binfo->cdev,
502 binfo->feature_dev);
503 else
504 binfo->cdev->fme_dev =
505 get_device(&binfo->feature_dev->dev);
506 /*
507 * reset it to avoid build_info_free() freeing their resource.
508 *
509 * The resource of successfully registered feature devices
510 * will be freed by platform_device_unregister(). See the
511 * comments in build_info_create_dev().
512 */
513 binfo->feature_dev = NULL;
514 }
515
516 return ret;
517}
518
519static int
520build_info_create_dev(struct build_feature_devs_info *binfo,
521 enum dfl_id_type type, void __iomem *ioaddr)
522{
523 struct platform_device *fdev;
524 int ret;
525
526 if (type >= DFL_ID_MAX)
527 return -EINVAL;
528
529 /* we will create a new device, commit current device first */
530 ret = build_info_commit_dev(binfo);
531 if (ret)
532 return ret;
533
534 /*
535 * we use -ENODEV as the initialization indicator which indicates
536 * whether the id need to be reclaimed
537 */
538 fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
539 if (!fdev)
540 return -ENOMEM;
541
542 binfo->feature_dev = fdev;
543 binfo->feature_num = 0;
544 binfo->ioaddr = ioaddr;
545 INIT_LIST_HEAD(&binfo->sub_features);
546
547 fdev->id = dfl_id_alloc(type, &fdev->dev);
548 if (fdev->id < 0)
549 return fdev->id;
550
551 fdev->dev.parent = &binfo->cdev->region->dev;
552 fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
553
554 return 0;
555}
556
557static void build_info_free(struct build_feature_devs_info *binfo)
558{
559 struct dfl_feature_info *finfo, *p;
560
561 /*
562 * it is a valid id, free it. See comments in
563 * build_info_create_dev()
564 */
565 if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
566 dfl_id_free(feature_dev_id_type(binfo->feature_dev),
567 binfo->feature_dev->id);
568
569 list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
570 list_del(&finfo->node);
571 kfree(finfo);
572 }
573 }
574
575 platform_device_put(binfo->feature_dev);
576
577 devm_kfree(binfo->dev, binfo);
578}
579
580static inline u32 feature_size(void __iomem *start)
581{
582 u64 v = readq(start + DFH);
583 u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
584 /* workaround for private features with invalid size, use 4K instead */
585 return ofst ? ofst : 4096;
586}
587
588static u64 feature_id(void __iomem *start)
589{
590 u64 v = readq(start + DFH);
591 u16 id = FIELD_GET(DFH_ID, v);
592 u8 type = FIELD_GET(DFH_TYPE, v);
593
594 if (type == DFH_TYPE_FIU)
595 return FEATURE_ID_FIU_HEADER;
596 else if (type == DFH_TYPE_PRIVATE)
597 return id;
598 else if (type == DFH_TYPE_AFU)
599 return FEATURE_ID_AFU;
600
601 WARN_ON(1);
602 return 0;
603}
604
605/*
606 * when create sub feature instances, for private features, it doesn't need
607 * to provide resource size and feature id as they could be read from DFH
608 * register. For afu sub feature, its register region only contains user
609 * defined registers, so never trust any information from it, just use the
610 * resource size information provided by its parent FIU.
611 */
612static int
613create_feature_instance(struct build_feature_devs_info *binfo,
614 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
615 resource_size_t size, u64 fid)
616{
617 struct dfl_feature_info *finfo;
618
619 /* read feature size and id if inputs are invalid */
620 size = size ? size : feature_size(dfl->ioaddr + ofst);
621 fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
622
623 if (dfl->len - ofst < size)
624 return -EINVAL;
625
626 finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
627 if (!finfo)
628 return -ENOMEM;
629
630 finfo->fid = fid;
631 finfo->mmio_res.start = dfl->start + ofst;
632 finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
633 finfo->mmio_res.flags = IORESOURCE_MEM;
634 finfo->ioaddr = dfl->ioaddr + ofst;
635
636 list_add_tail(&finfo->node, &binfo->sub_features);
637 binfo->feature_num++;
638
639 return 0;
640}
641
642static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
643 struct dfl_fpga_enum_dfl *dfl,
644 resource_size_t ofst)
645{
646 u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
647 u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
648
649 WARN_ON(!size);
650
651 return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
652}
653
654static int parse_feature_afu(struct build_feature_devs_info *binfo,
655 struct dfl_fpga_enum_dfl *dfl,
656 resource_size_t ofst)
657{
658 if (!binfo->feature_dev) {
659 dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
660 return -EINVAL;
661 }
662
663 switch (feature_dev_id_type(binfo->feature_dev)) {
664 case PORT_ID:
665 return parse_feature_port_afu(binfo, dfl, ofst);
666 default:
667 dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
668 binfo->feature_dev->name);
669 }
670
671 return 0;
672}
673
674static int parse_feature_fiu(struct build_feature_devs_info *binfo,
675 struct dfl_fpga_enum_dfl *dfl,
676 resource_size_t ofst)
677{
678 u32 id, offset;
679 u64 v;
680 int ret = 0;
681
682 v = readq(dfl->ioaddr + ofst + DFH);
683 id = FIELD_GET(DFH_ID, v);
684
685 /* create platform device for dfl feature dev */
686 ret = build_info_create_dev(binfo, dfh_id_to_type(id),
687 dfl->ioaddr + ofst);
688 if (ret)
689 return ret;
690
691 ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
692 if (ret)
693 return ret;
694 /*
695 * find and parse FIU's child AFU via its NEXT_AFU register.
696 * please note that only Port has valid NEXT_AFU pointer per spec.
697 */
698 v = readq(dfl->ioaddr + ofst + NEXT_AFU);
699
700 offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
701 if (offset)
702 return parse_feature_afu(binfo, dfl, ofst + offset);
703
704 dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
705
706 return ret;
707}
708
709static int parse_feature_private(struct build_feature_devs_info *binfo,
710 struct dfl_fpga_enum_dfl *dfl,
711 resource_size_t ofst)
712{
713 if (!binfo->feature_dev) {
714 dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
715 (unsigned long long)feature_id(dfl->ioaddr + ofst));
716 return -EINVAL;
717 }
718
719 return create_feature_instance(binfo, dfl, ofst, 0, 0);
720}
721
722/**
723 * parse_feature - parse a feature on given device feature list
724 *
725 * @binfo: build feature devices information.
726 * @dfl: device feature list to parse
727 * @ofst: offset to feature header on this device feature list
728 */
729static int parse_feature(struct build_feature_devs_info *binfo,
730 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
731{
732 u64 v;
733 u32 type;
734
735 v = readq(dfl->ioaddr + ofst + DFH);
736 type = FIELD_GET(DFH_TYPE, v);
737
738 switch (type) {
739 case DFH_TYPE_AFU:
740 return parse_feature_afu(binfo, dfl, ofst);
741 case DFH_TYPE_PRIVATE:
742 return parse_feature_private(binfo, dfl, ofst);
743 case DFH_TYPE_FIU:
744 return parse_feature_fiu(binfo, dfl, ofst);
745 default:
746 dev_info(binfo->dev,
747 "Feature Type %x is not supported.\n", type);
748 }
749
750 return 0;
751}
752
753static int parse_feature_list(struct build_feature_devs_info *binfo,
754 struct dfl_fpga_enum_dfl *dfl)
755{
756 void __iomem *start = dfl->ioaddr;
757 void __iomem *end = dfl->ioaddr + dfl->len;
758 int ret = 0;
759 u32 ofst = 0;
760 u64 v;
761
762 /* walk through the device feature list via DFH's next DFH pointer. */
763 for (; start < end; start += ofst) {
764 if (end - start < DFH_SIZE) {
765 dev_err(binfo->dev, "The region is too small to contain a feature.\n");
766 return -EINVAL;
767 }
768
769 ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
770 if (ret)
771 return ret;
772
773 v = readq(start + DFH);
774 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
775
776 /* stop parsing if EOL(End of List) is set or offset is 0 */
777 if ((v & DFH_EOL) || !ofst)
778 break;
779 }
780
781 /* commit current feature device when reach the end of list */
782 return build_info_commit_dev(binfo);
783}
784
785struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
786{
787 struct dfl_fpga_enum_info *info;
788
789 get_device(dev);
790
791 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
792 if (!info) {
793 put_device(dev);
794 return NULL;
795 }
796
797 info->dev = dev;
798 INIT_LIST_HEAD(&info->dfls);
799
800 return info;
801}
802EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
803
804void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
805{
806 struct dfl_fpga_enum_dfl *tmp, *dfl;
807 struct device *dev;
808
809 if (!info)
810 return;
811
812 dev = info->dev;
813
814 /* remove all device feature lists in the list. */
815 list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
816 list_del(&dfl->node);
817 devm_kfree(dev, dfl);
818 }
819
820 devm_kfree(dev, info);
821 put_device(dev);
822}
823EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
824
825/**
826 * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
827 *
828 * @info: ptr to dfl_fpga_enum_info
829 * @start: mmio resource address of the device feature list.
830 * @len: mmio resource length of the device feature list.
831 * @ioaddr: mapped mmio resource address of the device feature list.
832 *
833 * One FPGA device may have one or more Device Feature Lists (DFLs), use this
834 * function to add information of each DFL to common data structure for next
835 * step enumeration.
836 *
837 * Return: 0 on success, negative error code otherwise.
838 */
839int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
840 resource_size_t start, resource_size_t len,
841 void __iomem *ioaddr)
842{
843 struct dfl_fpga_enum_dfl *dfl;
844
845 dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
846 if (!dfl)
847 return -ENOMEM;
848
849 dfl->start = start;
850 dfl->len = len;
851 dfl->ioaddr = ioaddr;
852
853 list_add_tail(&dfl->node, &info->dfls);
854
855 return 0;
856}
857EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
858
859static int remove_feature_dev(struct device *dev, void *data)
860{
861 struct platform_device *pdev = to_platform_device(dev);
862 enum dfl_id_type type = feature_dev_id_type(pdev);
863 int id = pdev->id;
864
865 platform_device_unregister(pdev);
866
867 dfl_id_free(type, id);
868
869 return 0;
870}
871
872static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
873{
874 device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
875}
876
877/**
878 * dfl_fpga_feature_devs_enumerate - enumerate feature devices
879 * @info: information for enumeration.
880 *
881 * This function creates a container device (base FPGA region), enumerates
882 * feature devices based on the enumeration info and creates platform devices
883 * under the container device.
884 *
885 * Return: dfl_fpga_cdev struct on success, -errno on failure
886 */
887struct dfl_fpga_cdev *
888dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
889{
890 struct build_feature_devs_info *binfo;
891 struct dfl_fpga_enum_dfl *dfl;
892 struct dfl_fpga_cdev *cdev;
893 int ret = 0;
894
895 if (!info->dev)
896 return ERR_PTR(-ENODEV);
897
898 cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
899 if (!cdev)
900 return ERR_PTR(-ENOMEM);
901
902 cdev->region = fpga_region_create(info->dev, NULL, NULL);
903 if (!cdev->region) {
904 ret = -ENOMEM;
905 goto free_cdev_exit;
906 }
907
908 cdev->parent = info->dev;
909 mutex_init(&cdev->lock);
910 INIT_LIST_HEAD(&cdev->port_dev_list);
911
912 ret = fpga_region_register(cdev->region);
913 if (ret)
914 goto free_region_exit;
915
916 /* create and init build info for enumeration */
917 binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
918 if (!binfo) {
919 ret = -ENOMEM;
920 goto unregister_region_exit;
921 }
922
923 binfo->dev = info->dev;
924 binfo->cdev = cdev;
925
926 /*
927 * start enumeration for all feature devices based on Device Feature
928 * Lists.
929 */
930 list_for_each_entry(dfl, &info->dfls, node) {
931 ret = parse_feature_list(binfo, dfl);
932 if (ret) {
933 remove_feature_devs(cdev);
934 build_info_free(binfo);
935 goto unregister_region_exit;
936 }
937 }
938
939 build_info_free(binfo);
940
941 return cdev;
942
943unregister_region_exit:
944 fpga_region_unregister(cdev->region);
945free_region_exit:
946 fpga_region_free(cdev->region);
947free_cdev_exit:
948 devm_kfree(info->dev, cdev);
949 return ERR_PTR(ret);
950}
951EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
952
953/**
954 * dfl_fpga_feature_devs_remove - remove all feature devices
955 * @cdev: fpga container device.
956 *
957 * Remove the container device and all feature devices under given container
958 * devices.
959 */
960void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
961{
962 struct dfl_feature_platform_data *pdata, *ptmp;
963
964 remove_feature_devs(cdev);
965
966 mutex_lock(&cdev->lock);
967 if (cdev->fme_dev) {
968 /* the fme should be unregistered. */
969 WARN_ON(device_is_registered(cdev->fme_dev));
970 put_device(cdev->fme_dev);
971 }
972
973 list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
974 struct platform_device *port_dev = pdata->dev;
975
976 /* the port should be unregistered. */
977 WARN_ON(device_is_registered(&port_dev->dev));
978 list_del(&pdata->node);
979 put_device(&port_dev->dev);
980 }
981 mutex_unlock(&cdev->lock);
982
983 fpga_region_unregister(cdev->region);
984 devm_kfree(cdev->parent, cdev);
985}
986EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
987
988/**
989 * __dfl_fpga_cdev_find_port - find a port under given container device
990 *
991 * @cdev: container device
992 * @data: data passed to match function
993 * @match: match function used to find specific port from the port device list
994 *
995 * Find a port device under container device. This function needs to be
996 * invoked with lock held.
997 *
998 * Return: pointer to port's platform device if successful, NULL otherwise.
999 *
1000 * NOTE: you will need to drop the device reference with put_device() after use.
1001 */
1002struct platform_device *
1003__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1004 int (*match)(struct platform_device *, void *))
1005{
1006 struct dfl_feature_platform_data *pdata;
1007 struct platform_device *port_dev;
1008
1009 list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1010 port_dev = pdata->dev;
1011
1012 if (match(port_dev, data) && get_device(&port_dev->dev))
1013 return port_dev;
1014 }
1015
1016 return NULL;
1017}
1018EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1019
1020static int __init dfl_fpga_init(void)
1021{
1022 int ret;
1023
1024 dfl_ids_init();
1025
1026 ret = dfl_chardev_init();
1027 if (ret)
1028 dfl_ids_destroy();
1029
1030 return ret;
1031}
1032
1033static void __exit dfl_fpga_exit(void)
1034{
1035 dfl_chardev_uinit();
1036 dfl_ids_destroy();
1037}
1038
1039module_init(dfl_fpga_init);
1040module_exit(dfl_fpga_exit);
1041
1042MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1043MODULE_AUTHOR("Intel Corporation");
1044MODULE_LICENSE("GPL v2");
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
new file mode 100644
index 000000000000..a8b869e9e5b7
--- /dev/null
+++ b/drivers/fpga/dfl.h
@@ -0,0 +1,410 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Driver Header File for FPGA Device Feature List (DFL) Support
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <luwei.kang@intel.com>
9 * Zhang Yi <yi.z.zhang@intel.com>
10 * Wu Hao <hao.wu@intel.com>
11 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
12 */
13
14#ifndef __FPGA_DFL_H
15#define __FPGA_DFL_H
16
17#include <linux/bitfield.h>
18#include <linux/cdev.h>
19#include <linux/delay.h>
20#include <linux/fs.h>
21#include <linux/iopoll.h>
22#include <linux/io-64-nonatomic-lo-hi.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/uuid.h>
26#include <linux/fpga/fpga-region.h>
27
28/* maximum supported number of ports */
29#define MAX_DFL_FPGA_PORT_NUM 4
30/* plus one for fme device */
31#define MAX_DFL_FEATURE_DEV_NUM (MAX_DFL_FPGA_PORT_NUM + 1)
32
33/* Reserved 0x0 for Header Group Register and 0xff for AFU */
34#define FEATURE_ID_FIU_HEADER 0x0
35#define FEATURE_ID_AFU 0xff
36
37#define FME_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
38#define FME_FEATURE_ID_THERMAL_MGMT 0x1
39#define FME_FEATURE_ID_POWER_MGMT 0x2
40#define FME_FEATURE_ID_GLOBAL_IPERF 0x3
41#define FME_FEATURE_ID_GLOBAL_ERR 0x4
42#define FME_FEATURE_ID_PR_MGMT 0x5
43#define FME_FEATURE_ID_HSSI 0x6
44#define FME_FEATURE_ID_GLOBAL_DPERF 0x7
45
46#define PORT_FEATURE_ID_HEADER FEATURE_ID_FIU_HEADER
47#define PORT_FEATURE_ID_AFU FEATURE_ID_AFU
48#define PORT_FEATURE_ID_ERROR 0x10
49#define PORT_FEATURE_ID_UMSG 0x11
50#define PORT_FEATURE_ID_UINT 0x12
51#define PORT_FEATURE_ID_STP 0x13
52
53/*
54 * Device Feature Header Register Set
55 *
56 * For FIUs, they all have DFH + GUID + NEXT_AFU as common header registers.
57 * For AFUs, they have DFH + GUID as common header registers.
58 * For private features, they only have DFH register as common header.
59 */
60#define DFH 0x0
61#define GUID_L 0x8
62#define GUID_H 0x10
63#define NEXT_AFU 0x18
64
65#define DFH_SIZE 0x8
66
67/* Device Feature Header Register Bitfield */
68#define DFH_ID GENMASK_ULL(11, 0) /* Feature ID */
69#define DFH_ID_FIU_FME 0
70#define DFH_ID_FIU_PORT 1
71#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
72#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
73#define DFH_EOL BIT_ULL(40) /* End of list */
74#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
75#define DFH_TYPE_AFU 1
76#define DFH_TYPE_PRIVATE 3
77#define DFH_TYPE_FIU 4
78
79/* Next AFU Register Bitfield */
80#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
81
82/* FME Header Register Set */
83#define FME_HDR_DFH DFH
84#define FME_HDR_GUID_L GUID_L
85#define FME_HDR_GUID_H GUID_H
86#define FME_HDR_NEXT_AFU NEXT_AFU
87#define FME_HDR_CAP 0x30
88#define FME_HDR_PORT_OFST(n) (0x38 + ((n) * 0x8))
89#define FME_HDR_BITSTREAM_ID 0x60
90#define FME_HDR_BITSTREAM_MD 0x68
91
92/* FME Fab Capability Register Bitfield */
93#define FME_CAP_FABRIC_VERID GENMASK_ULL(7, 0) /* Fabric version ID */
94#define FME_CAP_SOCKET_ID BIT_ULL(8) /* Socket ID */
95#define FME_CAP_PCIE0_LINK_AVL BIT_ULL(12) /* PCIE0 Link */
96#define FME_CAP_PCIE1_LINK_AVL BIT_ULL(13) /* PCIE1 Link */
97#define FME_CAP_COHR_LINK_AVL BIT_ULL(14) /* Coherent Link */
98#define FME_CAP_IOMMU_AVL BIT_ULL(16) /* IOMMU available */
99#define FME_CAP_NUM_PORTS GENMASK_ULL(19, 17) /* Number of ports */
100#define FME_CAP_ADDR_WIDTH GENMASK_ULL(29, 24) /* Address bus width */
101#define FME_CAP_CACHE_SIZE GENMASK_ULL(43, 32) /* cache size in KB */
102#define FME_CAP_CACHE_ASSOC GENMASK_ULL(47, 44) /* Associativity */
103
104/* FME Port Offset Register Bitfield */
105/* Offset to port device feature header */
106#define FME_PORT_OFST_DFH_OFST GENMASK_ULL(23, 0)
107/* PCI Bar ID for this port */
108#define FME_PORT_OFST_BAR_ID GENMASK_ULL(34, 32)
109/* AFU MMIO access permission. 1 - VF, 0 - PF. */
110#define FME_PORT_OFST_ACC_CTRL BIT_ULL(55)
111#define FME_PORT_OFST_ACC_PF 0
112#define FME_PORT_OFST_ACC_VF 1
113#define FME_PORT_OFST_IMP BIT_ULL(60)
114
115/* PORT Header Register Set */
116#define PORT_HDR_DFH DFH
117#define PORT_HDR_GUID_L GUID_L
118#define PORT_HDR_GUID_H GUID_H
119#define PORT_HDR_NEXT_AFU NEXT_AFU
120#define PORT_HDR_CAP 0x30
121#define PORT_HDR_CTRL 0x38
122
123/* Port Capability Register Bitfield */
124#define PORT_CAP_PORT_NUM GENMASK_ULL(1, 0) /* ID of this port */
125#define PORT_CAP_MMIO_SIZE GENMASK_ULL(23, 8) /* MMIO size in KB */
126#define PORT_CAP_SUPP_INT_NUM GENMASK_ULL(35, 32) /* Interrupts num */
127
128/* Port Control Register Bitfield */
129#define PORT_CTRL_SFTRST BIT_ULL(0) /* Port soft reset */
130/* Latency tolerance reporting. '1' >= 40us, '0' < 40us.*/
131#define PORT_CTRL_LATENCY BIT_ULL(2)
132#define PORT_CTRL_SFTRST_ACK BIT_ULL(4) /* HW ack for reset */
133/**
134 * struct dfl_fpga_port_ops - port ops
135 *
136 * @name: name of this port ops, to match with port platform device.
137 * @owner: pointer to the module which owns this port ops.
138 * @node: node to link port ops to global list.
139 * @get_id: get port id from hardware.
140 * @enable_set: enable/disable the port.
141 */
142struct dfl_fpga_port_ops {
143 const char *name;
144 struct module *owner;
145 struct list_head node;
146 int (*get_id)(struct platform_device *pdev);
147 int (*enable_set)(struct platform_device *pdev, bool enable);
148};
149
150void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops);
151void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops);
152struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev);
153void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops);
154int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id);
155
156/**
157 * struct dfl_feature_driver - sub feature's driver
158 *
159 * @id: sub feature id.
160 * @ops: ops of this sub feature.
161 */
162struct dfl_feature_driver {
163 u64 id;
164 const struct dfl_feature_ops *ops;
165};
166
167/**
168 * struct dfl_feature - sub feature of the feature devices
169 *
170 * @id: sub feature id.
171 * @resource_index: each sub feature has one mmio resource for its registers.
172 * this index is used to find its mmio resource from the
173 * feature dev (platform device)'s reources.
174 * @ioaddr: mapped mmio resource address.
175 * @ops: ops of this sub feature.
176 */
177struct dfl_feature {
178 u64 id;
179 int resource_index;
180 void __iomem *ioaddr;
181 const struct dfl_feature_ops *ops;
182};
183
184#define DEV_STATUS_IN_USE 0
185
186/**
187 * struct dfl_feature_platform_data - platform data for feature devices
188 *
189 * @node: node to link feature devs to container device's port_dev_list.
190 * @lock: mutex to protect platform data.
191 * @cdev: cdev of feature dev.
192 * @dev: ptr to platform device linked with this platform data.
193 * @dfl_cdev: ptr to container device.
194 * @disable_count: count for port disable.
195 * @num: number for sub features.
196 * @dev_status: dev status (e.g. DEV_STATUS_IN_USE).
197 * @private: ptr to feature dev private data.
198 * @features: sub features of this feature dev.
199 */
200struct dfl_feature_platform_data {
201 struct list_head node;
202 struct mutex lock;
203 struct cdev cdev;
204 struct platform_device *dev;
205 struct dfl_fpga_cdev *dfl_cdev;
206 unsigned int disable_count;
207 unsigned long dev_status;
208 void *private;
209 int num;
210 struct dfl_feature features[0];
211};
212
213static inline
214int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata)
215{
216 /* Test and set IN_USE flags to ensure file is exclusively used */
217 if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status))
218 return -EBUSY;
219
220 return 0;
221}
222
223static inline
224void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata)
225{
226 clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status);
227}
228
229static inline
230void dfl_fpga_pdata_set_private(struct dfl_feature_platform_data *pdata,
231 void *private)
232{
233 pdata->private = private;
234}
235
236static inline
237void *dfl_fpga_pdata_get_private(struct dfl_feature_platform_data *pdata)
238{
239 return pdata->private;
240}
241
242struct dfl_feature_ops {
243 int (*init)(struct platform_device *pdev, struct dfl_feature *feature);
244 void (*uinit)(struct platform_device *pdev,
245 struct dfl_feature *feature);
246 long (*ioctl)(struct platform_device *pdev, struct dfl_feature *feature,
247 unsigned int cmd, unsigned long arg);
248};
249
250#define DFL_FPGA_FEATURE_DEV_FME "dfl-fme"
251#define DFL_FPGA_FEATURE_DEV_PORT "dfl-port"
252
253static inline int dfl_feature_platform_data_size(const int num)
254{
255 return sizeof(struct dfl_feature_platform_data) +
256 num * sizeof(struct dfl_feature);
257}
258
259void dfl_fpga_dev_feature_uinit(struct platform_device *pdev);
260int dfl_fpga_dev_feature_init(struct platform_device *pdev,
261 struct dfl_feature_driver *feature_drvs);
262
263int dfl_fpga_dev_ops_register(struct platform_device *pdev,
264 const struct file_operations *fops,
265 struct module *owner);
266void dfl_fpga_dev_ops_unregister(struct platform_device *pdev);
267
268static inline
269struct platform_device *dfl_fpga_inode_to_feature_dev(struct inode *inode)
270{
271 struct dfl_feature_platform_data *pdata;
272
273 pdata = container_of(inode->i_cdev, struct dfl_feature_platform_data,
274 cdev);
275 return pdata->dev;
276}
277
278#define dfl_fpga_dev_for_each_feature(pdata, feature) \
279 for ((feature) = (pdata)->features; \
280 (feature) < (pdata)->features + (pdata)->num; (feature)++)
281
282static inline
283struct dfl_feature *dfl_get_feature_by_id(struct device *dev, u64 id)
284{
285 struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
286 struct dfl_feature *feature;
287
288 dfl_fpga_dev_for_each_feature(pdata, feature)
289 if (feature->id == id)
290 return feature;
291
292 return NULL;
293}
294
295static inline
296void __iomem *dfl_get_feature_ioaddr_by_id(struct device *dev, u64 id)
297{
298 struct dfl_feature *feature = dfl_get_feature_by_id(dev, id);
299
300 if (feature && feature->ioaddr)
301 return feature->ioaddr;
302
303 WARN_ON(1);
304 return NULL;
305}
306
307static inline bool is_dfl_feature_present(struct device *dev, u64 id)
308{
309 return !!dfl_get_feature_ioaddr_by_id(dev, id);
310}
311
312static inline
313struct device *dfl_fpga_pdata_to_parent(struct dfl_feature_platform_data *pdata)
314{
315 return pdata->dev->dev.parent->parent;
316}
317
318static inline bool dfl_feature_is_fme(void __iomem *base)
319{
320 u64 v = readq(base + DFH);
321
322 return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
323 (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_FME);
324}
325
326static inline bool dfl_feature_is_port(void __iomem *base)
327{
328 u64 v = readq(base + DFH);
329
330 return (FIELD_GET(DFH_TYPE, v) == DFH_TYPE_FIU) &&
331 (FIELD_GET(DFH_ID, v) == DFH_ID_FIU_PORT);
332}
333
334/**
335 * struct dfl_fpga_enum_info - DFL FPGA enumeration information
336 *
337 * @dev: parent device.
338 * @dfls: list of device feature lists.
339 */
340struct dfl_fpga_enum_info {
341 struct device *dev;
342 struct list_head dfls;
343};
344
345/**
346 * struct dfl_fpga_enum_dfl - DFL FPGA enumeration device feature list info
347 *
348 * @start: base address of this device feature list.
349 * @len: size of this device feature list.
350 * @ioaddr: mapped base address of this device feature list.
351 * @node: node in list of device feature lists.
352 */
353struct dfl_fpga_enum_dfl {
354 resource_size_t start;
355 resource_size_t len;
356
357 void __iomem *ioaddr;
358
359 struct list_head node;
360};
361
362struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev);
363int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
364 resource_size_t start, resource_size_t len,
365 void __iomem *ioaddr);
366void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info);
367
368/**
369 * struct dfl_fpga_cdev - container device of DFL based FPGA
370 *
371 * @parent: parent device of this container device.
372 * @region: base fpga region.
373 * @fme_dev: FME feature device under this container device.
374 * @lock: mutex lock to protect the port device list.
375 * @port_dev_list: list of all port feature devices under this container device.
376 */
377struct dfl_fpga_cdev {
378 struct device *parent;
379 struct fpga_region *region;
380 struct device *fme_dev;
381 struct mutex lock;
382 struct list_head port_dev_list;
383};
384
385struct dfl_fpga_cdev *
386dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info);
387void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev);
388
389/*
390 * need to drop the device reference with put_device() after use port platform
391 * device returned by __dfl_fpga_cdev_find_port and dfl_fpga_cdev_find_port
392 * functions.
393 */
394struct platform_device *
395__dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
396 int (*match)(struct platform_device *, void *));
397
398static inline struct platform_device *
399dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
400 int (*match)(struct platform_device *, void *))
401{
402 struct platform_device *pdev;
403
404 mutex_lock(&cdev->lock);
405 pdev = __dfl_fpga_cdev_find_port(cdev, data, match);
406 mutex_unlock(&cdev->lock);
407
408 return pdev;
409}
410#endif /* __FPGA_DFL_H */
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index c1564cf827fe..a41b07e37884 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -406,12 +406,40 @@ static ssize_t state_show(struct device *dev,
406 return sprintf(buf, "%s\n", state_str[mgr->state]); 406 return sprintf(buf, "%s\n", state_str[mgr->state]);
407} 407}
408 408
409static ssize_t status_show(struct device *dev,
410 struct device_attribute *attr, char *buf)
411{
412 struct fpga_manager *mgr = to_fpga_manager(dev);
413 u64 status;
414 int len = 0;
415
416 if (!mgr->mops->status)
417 return -ENOENT;
418
419 status = mgr->mops->status(mgr);
420
421 if (status & FPGA_MGR_STATUS_OPERATION_ERR)
422 len += sprintf(buf + len, "reconfig operation error\n");
423 if (status & FPGA_MGR_STATUS_CRC_ERR)
424 len += sprintf(buf + len, "reconfig CRC error\n");
425 if (status & FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR)
426 len += sprintf(buf + len, "reconfig incompatible image\n");
427 if (status & FPGA_MGR_STATUS_IP_PROTOCOL_ERR)
428 len += sprintf(buf + len, "reconfig IP protocol error\n");
429 if (status & FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR)
430 len += sprintf(buf + len, "reconfig fifo overflow error\n");
431
432 return len;
433}
434
409static DEVICE_ATTR_RO(name); 435static DEVICE_ATTR_RO(name);
410static DEVICE_ATTR_RO(state); 436static DEVICE_ATTR_RO(state);
437static DEVICE_ATTR_RO(status);
411 438
412static struct attribute *fpga_mgr_attrs[] = { 439static struct attribute *fpga_mgr_attrs[] = {
413 &dev_attr_name.attr, 440 &dev_attr_name.attr,
414 &dev_attr_state.attr, 441 &dev_attr_state.attr,
442 &dev_attr_status.attr,
415 NULL, 443 NULL,
416}; 444};
417ATTRIBUTE_GROUPS(fpga_mgr); 445ATTRIBUTE_GROUPS(fpga_mgr);
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 6d214d75c7be..0d65220d5ec5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -158,6 +158,27 @@ err_put_region:
158} 158}
159EXPORT_SYMBOL_GPL(fpga_region_program_fpga); 159EXPORT_SYMBOL_GPL(fpga_region_program_fpga);
160 160
161static ssize_t compat_id_show(struct device *dev,
162 struct device_attribute *attr, char *buf)
163{
164 struct fpga_region *region = to_fpga_region(dev);
165
166 if (!region->compat_id)
167 return -ENOENT;
168
169 return sprintf(buf, "%016llx%016llx\n",
170 (unsigned long long)region->compat_id->id_h,
171 (unsigned long long)region->compat_id->id_l);
172}
173
174static DEVICE_ATTR_RO(compat_id);
175
176static struct attribute *fpga_region_attrs[] = {
177 &dev_attr_compat_id.attr,
178 NULL,
179};
180ATTRIBUTE_GROUPS(fpga_region);
181
161/** 182/**
162 * fpga_region_create - alloc and init a struct fpga_region 183 * fpga_region_create - alloc and init a struct fpga_region
163 * @dev: device parent 184 * @dev: device parent
@@ -258,6 +279,7 @@ static int __init fpga_region_init(void)
258 if (IS_ERR(fpga_region_class)) 279 if (IS_ERR(fpga_region_class))
259 return PTR_ERR(fpga_region_class); 280 return PTR_ERR(fpga_region_class);
260 281
282 fpga_region_class->dev_groups = fpga_region_groups;
261 fpga_region_class->dev_release = fpga_region_dev_release; 283 fpga_region_class->dev_release = fpga_region_dev_release;
262 284
263 return 0; 285 return 0;