diff options
Diffstat (limited to 'drivers/pci/p2pdma.c')
-rw-r--r-- | drivers/pci/p2pdma.c | 805 |
1 files changed, 805 insertions, 0 deletions
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c new file mode 100644 index 000000000000..ae3c5b25dcc7 --- /dev/null +++ b/drivers/pci/p2pdma.c | |||
@@ -0,0 +1,805 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * PCI Peer 2 Peer DMA support. | ||
4 | * | ||
5 | * Copyright (c) 2016-2018, Logan Gunthorpe | ||
6 | * Copyright (c) 2016-2017, Microsemi Corporation | ||
7 | * Copyright (c) 2017, Christoph Hellwig | ||
8 | * Copyright (c) 2018, Eideticom Inc. | ||
9 | */ | ||
10 | |||
11 | #define pr_fmt(fmt) "pci-p2pdma: " fmt | ||
12 | #include <linux/ctype.h> | ||
13 | #include <linux/pci-p2pdma.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/genalloc.h> | ||
17 | #include <linux/memremap.h> | ||
18 | #include <linux/percpu-refcount.h> | ||
19 | #include <linux/random.h> | ||
20 | #include <linux/seq_buf.h> | ||
21 | |||
22 | struct pci_p2pdma { | ||
23 | struct percpu_ref devmap_ref; | ||
24 | struct completion devmap_ref_done; | ||
25 | struct gen_pool *pool; | ||
26 | bool p2pmem_published; | ||
27 | }; | ||
28 | |||
29 | static ssize_t size_show(struct device *dev, struct device_attribute *attr, | ||
30 | char *buf) | ||
31 | { | ||
32 | struct pci_dev *pdev = to_pci_dev(dev); | ||
33 | size_t size = 0; | ||
34 | |||
35 | if (pdev->p2pdma->pool) | ||
36 | size = gen_pool_size(pdev->p2pdma->pool); | ||
37 | |||
38 | return snprintf(buf, PAGE_SIZE, "%zd\n", size); | ||
39 | } | ||
40 | static DEVICE_ATTR_RO(size); | ||
41 | |||
42 | static ssize_t available_show(struct device *dev, struct device_attribute *attr, | ||
43 | char *buf) | ||
44 | { | ||
45 | struct pci_dev *pdev = to_pci_dev(dev); | ||
46 | size_t avail = 0; | ||
47 | |||
48 | if (pdev->p2pdma->pool) | ||
49 | avail = gen_pool_avail(pdev->p2pdma->pool); | ||
50 | |||
51 | return snprintf(buf, PAGE_SIZE, "%zd\n", avail); | ||
52 | } | ||
53 | static DEVICE_ATTR_RO(available); | ||
54 | |||
55 | static ssize_t published_show(struct device *dev, struct device_attribute *attr, | ||
56 | char *buf) | ||
57 | { | ||
58 | struct pci_dev *pdev = to_pci_dev(dev); | ||
59 | |||
60 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
61 | pdev->p2pdma->p2pmem_published); | ||
62 | } | ||
63 | static DEVICE_ATTR_RO(published); | ||
64 | |||
65 | static struct attribute *p2pmem_attrs[] = { | ||
66 | &dev_attr_size.attr, | ||
67 | &dev_attr_available.attr, | ||
68 | &dev_attr_published.attr, | ||
69 | NULL, | ||
70 | }; | ||
71 | |||
72 | static const struct attribute_group p2pmem_group = { | ||
73 | .attrs = p2pmem_attrs, | ||
74 | .name = "p2pmem", | ||
75 | }; | ||
76 | |||
77 | static void pci_p2pdma_percpu_release(struct percpu_ref *ref) | ||
78 | { | ||
79 | struct pci_p2pdma *p2p = | ||
80 | container_of(ref, struct pci_p2pdma, devmap_ref); | ||
81 | |||
82 | complete_all(&p2p->devmap_ref_done); | ||
83 | } | ||
84 | |||
85 | static void pci_p2pdma_percpu_kill(void *data) | ||
86 | { | ||
87 | struct percpu_ref *ref = data; | ||
88 | |||
89 | /* | ||
90 | * pci_p2pdma_add_resource() may be called multiple times | ||
91 | * by a driver and may register the percpu_kill devm action multiple | ||
92 | * times. We only want the first action to actually kill the | ||
93 | * percpu_ref. | ||
94 | */ | ||
95 | if (percpu_ref_is_dying(ref)) | ||
96 | return; | ||
97 | |||
98 | percpu_ref_kill(ref); | ||
99 | } | ||
100 | |||
101 | static void pci_p2pdma_release(void *data) | ||
102 | { | ||
103 | struct pci_dev *pdev = data; | ||
104 | |||
105 | if (!pdev->p2pdma) | ||
106 | return; | ||
107 | |||
108 | wait_for_completion(&pdev->p2pdma->devmap_ref_done); | ||
109 | percpu_ref_exit(&pdev->p2pdma->devmap_ref); | ||
110 | |||
111 | gen_pool_destroy(pdev->p2pdma->pool); | ||
112 | sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); | ||
113 | pdev->p2pdma = NULL; | ||
114 | } | ||
115 | |||
116 | static int pci_p2pdma_setup(struct pci_dev *pdev) | ||
117 | { | ||
118 | int error = -ENOMEM; | ||
119 | struct pci_p2pdma *p2p; | ||
120 | |||
121 | p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL); | ||
122 | if (!p2p) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); | ||
126 | if (!p2p->pool) | ||
127 | goto out; | ||
128 | |||
129 | init_completion(&p2p->devmap_ref_done); | ||
130 | error = percpu_ref_init(&p2p->devmap_ref, | ||
131 | pci_p2pdma_percpu_release, 0, GFP_KERNEL); | ||
132 | if (error) | ||
133 | goto out_pool_destroy; | ||
134 | |||
135 | error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); | ||
136 | if (error) | ||
137 | goto out_pool_destroy; | ||
138 | |||
139 | pdev->p2pdma = p2p; | ||
140 | |||
141 | error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group); | ||
142 | if (error) | ||
143 | goto out_pool_destroy; | ||
144 | |||
145 | return 0; | ||
146 | |||
147 | out_pool_destroy: | ||
148 | pdev->p2pdma = NULL; | ||
149 | gen_pool_destroy(p2p->pool); | ||
150 | out: | ||
151 | devm_kfree(&pdev->dev, p2p); | ||
152 | return error; | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * pci_p2pdma_add_resource - add memory for use as p2p memory | ||
157 | * @pdev: the device to add the memory to | ||
158 | * @bar: PCI BAR to add | ||
159 | * @size: size of the memory to add, may be zero to use the whole BAR | ||
160 | * @offset: offset into the PCI BAR | ||
161 | * | ||
162 | * The memory will be given ZONE_DEVICE struct pages so that it may | ||
163 | * be used with any DMA request. | ||
164 | */ | ||
165 | int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, | ||
166 | u64 offset) | ||
167 | { | ||
168 | struct dev_pagemap *pgmap; | ||
169 | void *addr; | ||
170 | int error; | ||
171 | |||
172 | if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) | ||
173 | return -EINVAL; | ||
174 | |||
175 | if (offset >= pci_resource_len(pdev, bar)) | ||
176 | return -EINVAL; | ||
177 | |||
178 | if (!size) | ||
179 | size = pci_resource_len(pdev, bar) - offset; | ||
180 | |||
181 | if (size + offset > pci_resource_len(pdev, bar)) | ||
182 | return -EINVAL; | ||
183 | |||
184 | if (!pdev->p2pdma) { | ||
185 | error = pci_p2pdma_setup(pdev); | ||
186 | if (error) | ||
187 | return error; | ||
188 | } | ||
189 | |||
190 | pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); | ||
191 | if (!pgmap) | ||
192 | return -ENOMEM; | ||
193 | |||
194 | pgmap->res.start = pci_resource_start(pdev, bar) + offset; | ||
195 | pgmap->res.end = pgmap->res.start + size - 1; | ||
196 | pgmap->res.flags = pci_resource_flags(pdev, bar); | ||
197 | pgmap->ref = &pdev->p2pdma->devmap_ref; | ||
198 | pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; | ||
199 | pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - | ||
200 | pci_resource_start(pdev, bar); | ||
201 | |||
202 | addr = devm_memremap_pages(&pdev->dev, pgmap); | ||
203 | if (IS_ERR(addr)) { | ||
204 | error = PTR_ERR(addr); | ||
205 | goto pgmap_free; | ||
206 | } | ||
207 | |||
208 | error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr, | ||
209 | pci_bus_address(pdev, bar) + offset, | ||
210 | resource_size(&pgmap->res), dev_to_node(&pdev->dev)); | ||
211 | if (error) | ||
212 | goto pgmap_free; | ||
213 | |||
214 | error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill, | ||
215 | &pdev->p2pdma->devmap_ref); | ||
216 | if (error) | ||
217 | goto pgmap_free; | ||
218 | |||
219 | pci_info(pdev, "added peer-to-peer DMA memory %pR\n", | ||
220 | &pgmap->res); | ||
221 | |||
222 | return 0; | ||
223 | |||
224 | pgmap_free: | ||
225 | devm_kfree(&pdev->dev, pgmap); | ||
226 | return error; | ||
227 | } | ||
228 | EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); | ||
229 | |||
230 | /* | ||
231 | * Note this function returns the parent PCI device with a | ||
232 | * reference taken. It is the caller's responsibily to drop | ||
233 | * the reference. | ||
234 | */ | ||
235 | static struct pci_dev *find_parent_pci_dev(struct device *dev) | ||
236 | { | ||
237 | struct device *parent; | ||
238 | |||
239 | dev = get_device(dev); | ||
240 | |||
241 | while (dev) { | ||
242 | if (dev_is_pci(dev)) | ||
243 | return to_pci_dev(dev); | ||
244 | |||
245 | parent = get_device(dev->parent); | ||
246 | put_device(dev); | ||
247 | dev = parent; | ||
248 | } | ||
249 | |||
250 | return NULL; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Check if a PCI bridge has its ACS redirection bits set to redirect P2P | ||
255 | * TLPs upstream via ACS. Returns 1 if the packets will be redirected | ||
256 | * upstream, 0 otherwise. | ||
257 | */ | ||
258 | static int pci_bridge_has_acs_redir(struct pci_dev *pdev) | ||
259 | { | ||
260 | int pos; | ||
261 | u16 ctrl; | ||
262 | |||
263 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); | ||
264 | if (!pos) | ||
265 | return 0; | ||
266 | |||
267 | pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); | ||
268 | |||
269 | if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)) | ||
270 | return 1; | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) | ||
276 | { | ||
277 | if (!buf) | ||
278 | return; | ||
279 | |||
280 | seq_buf_printf(buf, "%s;", pci_name(pdev)); | ||
281 | } | ||
282 | |||
283 | /* | ||
284 | * Find the distance through the nearest common upstream bridge between | ||
285 | * two PCI devices. | ||
286 | * | ||
287 | * If the two devices are the same device then 0 will be returned. | ||
288 | * | ||
289 | * If there are two virtual functions of the same device behind the same | ||
290 | * bridge port then 2 will be returned (one step down to the PCIe switch, | ||
291 | * then one step back to the same device). | ||
292 | * | ||
293 | * In the case where two devices are connected to the same PCIe switch, the | ||
294 | * value 4 will be returned. This corresponds to the following PCI tree: | ||
295 | * | ||
296 | * -+ Root Port | ||
297 | * \+ Switch Upstream Port | ||
298 | * +-+ Switch Downstream Port | ||
299 | * + \- Device A | ||
300 | * \-+ Switch Downstream Port | ||
301 | * \- Device B | ||
302 | * | ||
303 | * The distance is 4 because we traverse from Device A through the downstream | ||
304 | * port of the switch, to the common upstream port, back up to the second | ||
305 | * downstream port and then to Device B. | ||
306 | * | ||
307 | * Any two devices that don't have a common upstream bridge will return -1. | ||
308 | * In this way devices on separate PCIe root ports will be rejected, which | ||
309 | * is what we want for peer-to-peer seeing each PCIe root port defines a | ||
310 | * separate hierarchy domain and there's no way to determine whether the root | ||
311 | * complex supports forwarding between them. | ||
312 | * | ||
313 | * In the case where two devices are connected to different PCIe switches, | ||
314 | * this function will still return a positive distance as long as both | ||
315 | * switches eventually have a common upstream bridge. Note this covers | ||
316 | * the case of using multiple PCIe switches to achieve a desired level of | ||
317 | * fan-out from a root port. The exact distance will be a function of the | ||
318 | * number of switches between Device A and Device B. | ||
319 | * | ||
320 | * If a bridge which has any ACS redirection bits set is in the path | ||
321 | * then this functions will return -2. This is so we reject any | ||
322 | * cases where the TLPs are forwarded up into the root complex. | ||
323 | * In this case, a list of all infringing bridge addresses will be | ||
324 | * populated in acs_list (assuming it's non-null) for printk purposes. | ||
325 | */ | ||
326 | static int upstream_bridge_distance(struct pci_dev *a, | ||
327 | struct pci_dev *b, | ||
328 | struct seq_buf *acs_list) | ||
329 | { | ||
330 | int dist_a = 0; | ||
331 | int dist_b = 0; | ||
332 | struct pci_dev *bb = NULL; | ||
333 | int acs_cnt = 0; | ||
334 | |||
335 | /* | ||
336 | * Note, we don't need to take references to devices returned by | ||
337 | * pci_upstream_bridge() seeing we hold a reference to a child | ||
338 | * device which will already hold a reference to the upstream bridge. | ||
339 | */ | ||
340 | |||
341 | while (a) { | ||
342 | dist_b = 0; | ||
343 | |||
344 | if (pci_bridge_has_acs_redir(a)) { | ||
345 | seq_buf_print_bus_devfn(acs_list, a); | ||
346 | acs_cnt++; | ||
347 | } | ||
348 | |||
349 | bb = b; | ||
350 | |||
351 | while (bb) { | ||
352 | if (a == bb) | ||
353 | goto check_b_path_acs; | ||
354 | |||
355 | bb = pci_upstream_bridge(bb); | ||
356 | dist_b++; | ||
357 | } | ||
358 | |||
359 | a = pci_upstream_bridge(a); | ||
360 | dist_a++; | ||
361 | } | ||
362 | |||
363 | return -1; | ||
364 | |||
365 | check_b_path_acs: | ||
366 | bb = b; | ||
367 | |||
368 | while (bb) { | ||
369 | if (a == bb) | ||
370 | break; | ||
371 | |||
372 | if (pci_bridge_has_acs_redir(bb)) { | ||
373 | seq_buf_print_bus_devfn(acs_list, bb); | ||
374 | acs_cnt++; | ||
375 | } | ||
376 | |||
377 | bb = pci_upstream_bridge(bb); | ||
378 | } | ||
379 | |||
380 | if (acs_cnt) | ||
381 | return -2; | ||
382 | |||
383 | return dist_a + dist_b; | ||
384 | } | ||
385 | |||
386 | static int upstream_bridge_distance_warn(struct pci_dev *provider, | ||
387 | struct pci_dev *client) | ||
388 | { | ||
389 | struct seq_buf acs_list; | ||
390 | int ret; | ||
391 | |||
392 | seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE); | ||
393 | if (!acs_list.buffer) | ||
394 | return -ENOMEM; | ||
395 | |||
396 | ret = upstream_bridge_distance(provider, client, &acs_list); | ||
397 | if (ret == -2) { | ||
398 | pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n", | ||
399 | pci_name(provider)); | ||
400 | /* Drop final semicolon */ | ||
401 | acs_list.buffer[acs_list.len-1] = 0; | ||
402 | pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", | ||
403 | acs_list.buffer); | ||
404 | |||
405 | } else if (ret < 0) { | ||
406 | pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n", | ||
407 | pci_name(provider)); | ||
408 | } | ||
409 | |||
410 | kfree(acs_list.buffer); | ||
411 | |||
412 | return ret; | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * pci_p2pdma_distance_many - Determive the cumulative distance between | ||
417 | * a p2pdma provider and the clients in use. | ||
418 | * @provider: p2pdma provider to check against the client list | ||
419 | * @clients: array of devices to check (NULL-terminated) | ||
420 | * @num_clients: number of clients in the array | ||
421 | * @verbose: if true, print warnings for devices when we return -1 | ||
422 | * | ||
423 | * Returns -1 if any of the clients are not compatible (behind the same | ||
424 | * root port as the provider), otherwise returns a positive number where | ||
425 | * a lower number is the preferrable choice. (If there's one client | ||
426 | * that's the same as the provider it will return 0, which is best choice). | ||
427 | * | ||
428 | * For now, "compatible" means the provider and the clients are all behind | ||
429 | * the same PCI root port. This cuts out cases that may work but is safest | ||
430 | * for the user. Future work can expand this to white-list root complexes that | ||
431 | * can safely forward between each ports. | ||
432 | */ | ||
433 | int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, | ||
434 | int num_clients, bool verbose) | ||
435 | { | ||
436 | bool not_supported = false; | ||
437 | struct pci_dev *pci_client; | ||
438 | int distance = 0; | ||
439 | int i, ret; | ||
440 | |||
441 | if (num_clients == 0) | ||
442 | return -1; | ||
443 | |||
444 | for (i = 0; i < num_clients; i++) { | ||
445 | pci_client = find_parent_pci_dev(clients[i]); | ||
446 | if (!pci_client) { | ||
447 | if (verbose) | ||
448 | dev_warn(clients[i], | ||
449 | "cannot be used for peer-to-peer DMA as it is not a PCI device\n"); | ||
450 | return -1; | ||
451 | } | ||
452 | |||
453 | if (verbose) | ||
454 | ret = upstream_bridge_distance_warn(provider, | ||
455 | pci_client); | ||
456 | else | ||
457 | ret = upstream_bridge_distance(provider, pci_client, | ||
458 | NULL); | ||
459 | |||
460 | pci_dev_put(pci_client); | ||
461 | |||
462 | if (ret < 0) | ||
463 | not_supported = true; | ||
464 | |||
465 | if (not_supported && !verbose) | ||
466 | break; | ||
467 | |||
468 | distance += ret; | ||
469 | } | ||
470 | |||
471 | if (not_supported) | ||
472 | return -1; | ||
473 | |||
474 | return distance; | ||
475 | } | ||
476 | EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); | ||
477 | |||
478 | /** | ||
479 | * pci_has_p2pmem - check if a given PCI device has published any p2pmem | ||
480 | * @pdev: PCI device to check | ||
481 | */ | ||
482 | bool pci_has_p2pmem(struct pci_dev *pdev) | ||
483 | { | ||
484 | return pdev->p2pdma && pdev->p2pdma->p2pmem_published; | ||
485 | } | ||
486 | EXPORT_SYMBOL_GPL(pci_has_p2pmem); | ||
487 | |||
488 | /** | ||
489 | * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with | ||
490 | * the specified list of clients and shortest distance (as determined | ||
491 | * by pci_p2pmem_dma()) | ||
492 | * @clients: array of devices to check (NULL-terminated) | ||
493 | * @num_clients: number of client devices in the list | ||
494 | * | ||
495 | * If multiple devices are behind the same switch, the one "closest" to the | ||
496 | * client devices in use will be chosen first. (So if one of the providers are | ||
497 | * the same as one of the clients, that provider will be used ahead of any | ||
498 | * other providers that are unrelated). If multiple providers are an equal | ||
499 | * distance away, one will be chosen at random. | ||
500 | * | ||
501 | * Returns a pointer to the PCI device with a reference taken (use pci_dev_put | ||
502 | * to return the reference) or NULL if no compatible device is found. The | ||
503 | * found provider will also be assigned to the client list. | ||
504 | */ | ||
505 | struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) | ||
506 | { | ||
507 | struct pci_dev *pdev = NULL; | ||
508 | int distance; | ||
509 | int closest_distance = INT_MAX; | ||
510 | struct pci_dev **closest_pdevs; | ||
511 | int dev_cnt = 0; | ||
512 | const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs); | ||
513 | int i; | ||
514 | |||
515 | closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
516 | if (!closest_pdevs) | ||
517 | return NULL; | ||
518 | |||
519 | while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) { | ||
520 | if (!pci_has_p2pmem(pdev)) | ||
521 | continue; | ||
522 | |||
523 | distance = pci_p2pdma_distance_many(pdev, clients, | ||
524 | num_clients, false); | ||
525 | if (distance < 0 || distance > closest_distance) | ||
526 | continue; | ||
527 | |||
528 | if (distance == closest_distance && dev_cnt >= max_devs) | ||
529 | continue; | ||
530 | |||
531 | if (distance < closest_distance) { | ||
532 | for (i = 0; i < dev_cnt; i++) | ||
533 | pci_dev_put(closest_pdevs[i]); | ||
534 | |||
535 | dev_cnt = 0; | ||
536 | closest_distance = distance; | ||
537 | } | ||
538 | |||
539 | closest_pdevs[dev_cnt++] = pci_dev_get(pdev); | ||
540 | } | ||
541 | |||
542 | if (dev_cnt) | ||
543 | pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]); | ||
544 | |||
545 | for (i = 0; i < dev_cnt; i++) | ||
546 | pci_dev_put(closest_pdevs[i]); | ||
547 | |||
548 | kfree(closest_pdevs); | ||
549 | return pdev; | ||
550 | } | ||
551 | EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); | ||
552 | |||
553 | /** | ||
554 | * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory | ||
555 | * @pdev: the device to allocate memory from | ||
556 | * @size: number of bytes to allocate | ||
557 | * | ||
558 | * Returns the allocated memory or NULL on error. | ||
559 | */ | ||
560 | void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) | ||
561 | { | ||
562 | void *ret; | ||
563 | |||
564 | if (unlikely(!pdev->p2pdma)) | ||
565 | return NULL; | ||
566 | |||
567 | if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref))) | ||
568 | return NULL; | ||
569 | |||
570 | ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size); | ||
571 | |||
572 | if (unlikely(!ret)) | ||
573 | percpu_ref_put(&pdev->p2pdma->devmap_ref); | ||
574 | |||
575 | return ret; | ||
576 | } | ||
577 | EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); | ||
578 | |||
579 | /** | ||
580 | * pci_free_p2pmem - free peer-to-peer DMA memory | ||
581 | * @pdev: the device the memory was allocated from | ||
582 | * @addr: address of the memory that was allocated | ||
583 | * @size: number of bytes that was allocated | ||
584 | */ | ||
585 | void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) | ||
586 | { | ||
587 | gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size); | ||
588 | percpu_ref_put(&pdev->p2pdma->devmap_ref); | ||
589 | } | ||
590 | EXPORT_SYMBOL_GPL(pci_free_p2pmem); | ||
591 | |||
592 | /** | ||
593 | * pci_virt_to_bus - return the PCI bus address for a given virtual | ||
594 | * address obtained with pci_alloc_p2pmem() | ||
595 | * @pdev: the device the memory was allocated from | ||
596 | * @addr: address of the memory that was allocated | ||
597 | */ | ||
598 | pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) | ||
599 | { | ||
600 | if (!addr) | ||
601 | return 0; | ||
602 | if (!pdev->p2pdma) | ||
603 | return 0; | ||
604 | |||
605 | /* | ||
606 | * Note: when we added the memory to the pool we used the PCI | ||
607 | * bus address as the physical address. So gen_pool_virt_to_phys() | ||
608 | * actually returns the bus address despite the misleading name. | ||
609 | */ | ||
610 | return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr); | ||
611 | } | ||
612 | EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus); | ||
613 | |||
614 | /** | ||
615 | * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist | ||
616 | * @pdev: the device to allocate memory from | ||
617 | * @nents: the number of SG entries in the list | ||
618 | * @length: number of bytes to allocate | ||
619 | * | ||
620 | * Returns 0 on success | ||
621 | */ | ||
622 | struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, | ||
623 | unsigned int *nents, u32 length) | ||
624 | { | ||
625 | struct scatterlist *sg; | ||
626 | void *addr; | ||
627 | |||
628 | sg = kzalloc(sizeof(*sg), GFP_KERNEL); | ||
629 | if (!sg) | ||
630 | return NULL; | ||
631 | |||
632 | sg_init_table(sg, 1); | ||
633 | |||
634 | addr = pci_alloc_p2pmem(pdev, length); | ||
635 | if (!addr) | ||
636 | goto out_free_sg; | ||
637 | |||
638 | sg_set_buf(sg, addr, length); | ||
639 | *nents = 1; | ||
640 | return sg; | ||
641 | |||
642 | out_free_sg: | ||
643 | kfree(sg); | ||
644 | return NULL; | ||
645 | } | ||
646 | EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl); | ||
647 | |||
648 | /** | ||
649 | * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl() | ||
650 | * @pdev: the device to allocate memory from | ||
651 | * @sgl: the allocated scatterlist | ||
652 | */ | ||
653 | void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) | ||
654 | { | ||
655 | struct scatterlist *sg; | ||
656 | int count; | ||
657 | |||
658 | for_each_sg(sgl, sg, INT_MAX, count) { | ||
659 | if (!sg) | ||
660 | break; | ||
661 | |||
662 | pci_free_p2pmem(pdev, sg_virt(sg), sg->length); | ||
663 | } | ||
664 | kfree(sgl); | ||
665 | } | ||
666 | EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl); | ||
667 | |||
668 | /** | ||
669 | * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by | ||
670 | * other devices with pci_p2pmem_find() | ||
671 | * @pdev: the device with peer-to-peer DMA memory to publish | ||
672 | * @publish: set to true to publish the memory, false to unpublish it | ||
673 | * | ||
674 | * Published memory can be used by other PCI device drivers for | ||
675 | * peer-2-peer DMA operations. Non-published memory is reserved for | ||
676 | * exlusive use of the device driver that registers the peer-to-peer | ||
677 | * memory. | ||
678 | */ | ||
679 | void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) | ||
680 | { | ||
681 | if (pdev->p2pdma) | ||
682 | pdev->p2pdma->p2pmem_published = publish; | ||
683 | } | ||
684 | EXPORT_SYMBOL_GPL(pci_p2pmem_publish); | ||
685 | |||
686 | /** | ||
687 | * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA | ||
688 | * @dev: device doing the DMA request | ||
689 | * @sg: scatter list to map | ||
690 | * @nents: elements in the scatterlist | ||
691 | * @dir: DMA direction | ||
692 | * | ||
693 | * Scatterlists mapped with this function should not be unmapped in any way. | ||
694 | * | ||
695 | * Returns the number of SG entries mapped or 0 on error. | ||
696 | */ | ||
697 | int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
698 | enum dma_data_direction dir) | ||
699 | { | ||
700 | struct dev_pagemap *pgmap; | ||
701 | struct scatterlist *s; | ||
702 | phys_addr_t paddr; | ||
703 | int i; | ||
704 | |||
705 | /* | ||
706 | * p2pdma mappings are not compatible with devices that use | ||
707 | * dma_virt_ops. If the upper layers do the right thing | ||
708 | * this should never happen because it will be prevented | ||
709 | * by the check in pci_p2pdma_add_client() | ||
710 | */ | ||
711 | if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && | ||
712 | dev->dma_ops == &dma_virt_ops)) | ||
713 | return 0; | ||
714 | |||
715 | for_each_sg(sg, s, nents, i) { | ||
716 | pgmap = sg_page(s)->pgmap; | ||
717 | paddr = sg_phys(s); | ||
718 | |||
719 | s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset; | ||
720 | sg_dma_len(s) = s->length; | ||
721 | } | ||
722 | |||
723 | return nents; | ||
724 | } | ||
725 | EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg); | ||
726 | |||
727 | /** | ||
728 | * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store | ||
729 | * to enable p2pdma | ||
730 | * @page: contents of the value to be stored | ||
731 | * @p2p_dev: returns the PCI device that was selected to be used | ||
732 | * (if one was specified in the stored value) | ||
733 | * @use_p2pdma: returns whether to enable p2pdma or not | ||
734 | * | ||
735 | * Parses an attribute value to decide whether to enable p2pdma. | ||
736 | * The value can select a PCI device (using it's full BDF device | ||
737 | * name) or a boolean (in any format strtobool() accepts). A false | ||
738 | * value disables p2pdma, a true value expects the caller | ||
739 | * to automatically find a compatible device and specifying a PCI device | ||
740 | * expects the caller to use the specific provider. | ||
741 | * | ||
742 | * pci_p2pdma_enable_show() should be used as the show operation for | ||
743 | * the attribute. | ||
744 | * | ||
745 | * Returns 0 on success | ||
746 | */ | ||
747 | int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, | ||
748 | bool *use_p2pdma) | ||
749 | { | ||
750 | struct device *dev; | ||
751 | |||
752 | dev = bus_find_device_by_name(&pci_bus_type, NULL, page); | ||
753 | if (dev) { | ||
754 | *use_p2pdma = true; | ||
755 | *p2p_dev = to_pci_dev(dev); | ||
756 | |||
757 | if (!pci_has_p2pmem(*p2p_dev)) { | ||
758 | pci_err(*p2p_dev, | ||
759 | "PCI device has no peer-to-peer memory: %s\n", | ||
760 | page); | ||
761 | pci_dev_put(*p2p_dev); | ||
762 | return -ENODEV; | ||
763 | } | ||
764 | |||
765 | return 0; | ||
766 | } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) { | ||
767 | /* | ||
768 | * If the user enters a PCI device that doesn't exist | ||
769 | * like "0000:01:00.1", we don't want strtobool to think | ||
770 | * it's a '0' when it's clearly not what the user wanted. | ||
771 | * So we require 0's and 1's to be exactly one character. | ||
772 | */ | ||
773 | } else if (!strtobool(page, use_p2pdma)) { | ||
774 | return 0; | ||
775 | } | ||
776 | |||
777 | pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page); | ||
778 | return -ENODEV; | ||
779 | } | ||
780 | EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store); | ||
781 | |||
782 | /** | ||
783 | * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating | ||
784 | * whether p2pdma is enabled | ||
785 | * @page: contents of the stored value | ||
786 | * @p2p_dev: the selected p2p device (NULL if no device is selected) | ||
787 | * @use_p2pdma: whether p2pdme has been enabled | ||
788 | * | ||
789 | * Attributes that use pci_p2pdma_enable_store() should use this function | ||
790 | * to show the value of the attribute. | ||
791 | * | ||
792 | * Returns 0 on success | ||
793 | */ | ||
794 | ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, | ||
795 | bool use_p2pdma) | ||
796 | { | ||
797 | if (!use_p2pdma) | ||
798 | return sprintf(page, "0\n"); | ||
799 | |||
800 | if (!p2p_dev) | ||
801 | return sprintf(page, "1\n"); | ||
802 | |||
803 | return sprintf(page, "%s\n", pci_name(p2p_dev)); | ||
804 | } | ||
805 | EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show); | ||