summaryrefslogtreecommitdiffstats
path: root/drivers/pci/p2pdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/p2pdma.c')
-rw-r--r--drivers/pci/p2pdma.c115
1 files changed, 72 insertions, 43 deletions
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 742928d0053e..a98126ad9c3a 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -20,12 +20,16 @@
20#include <linux/seq_buf.h> 20#include <linux/seq_buf.h>
21 21
22struct pci_p2pdma { 22struct pci_p2pdma {
23 struct percpu_ref devmap_ref;
24 struct completion devmap_ref_done;
25 struct gen_pool *pool; 23 struct gen_pool *pool;
26 bool p2pmem_published; 24 bool p2pmem_published;
27}; 25};
28 26
27struct p2pdma_pagemap {
28 struct dev_pagemap pgmap;
29 struct percpu_ref ref;
30 struct completion ref_done;
31};
32
29static ssize_t size_show(struct device *dev, struct device_attribute *attr, 33static ssize_t size_show(struct device *dev, struct device_attribute *attr,
30 char *buf) 34 char *buf)
31{ 35{
@@ -74,41 +78,45 @@ static const struct attribute_group p2pmem_group = {
74 .name = "p2pmem", 78 .name = "p2pmem",
75}; 79};
76 80
81static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref)
82{
83 return container_of(ref, struct p2pdma_pagemap, ref);
84}
85
77static void pci_p2pdma_percpu_release(struct percpu_ref *ref) 86static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
78{ 87{
79 struct pci_p2pdma *p2p = 88 struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
80 container_of(ref, struct pci_p2pdma, devmap_ref);
81 89
82 complete_all(&p2p->devmap_ref_done); 90 complete(&p2p_pgmap->ref_done);
83} 91}
84 92
85static void pci_p2pdma_percpu_kill(struct percpu_ref *ref) 93static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
86{ 94{
87 /*
88 * pci_p2pdma_add_resource() may be called multiple times
89 * by a driver and may register the percpu_kill devm action multiple
90 * times. We only want the first action to actually kill the
91 * percpu_ref.
92 */
93 if (percpu_ref_is_dying(ref))
94 return;
95
96 percpu_ref_kill(ref); 95 percpu_ref_kill(ref);
97} 96}
98 97
98static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
99{
100 struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
101
102 wait_for_completion(&p2p_pgmap->ref_done);
103 percpu_ref_exit(&p2p_pgmap->ref);
104}
105
99static void pci_p2pdma_release(void *data) 106static void pci_p2pdma_release(void *data)
100{ 107{
101 struct pci_dev *pdev = data; 108 struct pci_dev *pdev = data;
109 struct pci_p2pdma *p2pdma = pdev->p2pdma;
102 110
103 if (!pdev->p2pdma) 111 if (!p2pdma)
104 return; 112 return;
105 113
106 wait_for_completion(&pdev->p2pdma->devmap_ref_done); 114 /* Flush and disable pci_alloc_p2p_mem() */
107 percpu_ref_exit(&pdev->p2pdma->devmap_ref); 115 pdev->p2pdma = NULL;
116 synchronize_rcu();
108 117
109 gen_pool_destroy(pdev->p2pdma->pool); 118 gen_pool_destroy(p2pdma->pool);
110 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); 119 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
111 pdev->p2pdma = NULL;
112} 120}
113 121
114static int pci_p2pdma_setup(struct pci_dev *pdev) 122static int pci_p2pdma_setup(struct pci_dev *pdev)
@@ -124,12 +132,6 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
124 if (!p2p->pool) 132 if (!p2p->pool)
125 goto out; 133 goto out;
126 134
127 init_completion(&p2p->devmap_ref_done);
128 error = percpu_ref_init(&p2p->devmap_ref,
129 pci_p2pdma_percpu_release, 0, GFP_KERNEL);
130 if (error)
131 goto out_pool_destroy;
132
133 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); 135 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
134 if (error) 136 if (error)
135 goto out_pool_destroy; 137 goto out_pool_destroy;
@@ -163,6 +165,7 @@ out:
163int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, 165int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
164 u64 offset) 166 u64 offset)
165{ 167{
168 struct p2pdma_pagemap *p2p_pgmap;
166 struct dev_pagemap *pgmap; 169 struct dev_pagemap *pgmap;
167 void *addr; 170 void *addr;
168 int error; 171 int error;
@@ -185,18 +188,27 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
185 return error; 188 return error;
186 } 189 }
187 190
188 pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL); 191 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
189 if (!pgmap) 192 if (!p2p_pgmap)
190 return -ENOMEM; 193 return -ENOMEM;
191 194
195 init_completion(&p2p_pgmap->ref_done);
196 error = percpu_ref_init(&p2p_pgmap->ref,
197 pci_p2pdma_percpu_release, 0, GFP_KERNEL);
198 if (error)
199 goto pgmap_free;
200
201 pgmap = &p2p_pgmap->pgmap;
202
192 pgmap->res.start = pci_resource_start(pdev, bar) + offset; 203 pgmap->res.start = pci_resource_start(pdev, bar) + offset;
193 pgmap->res.end = pgmap->res.start + size - 1; 204 pgmap->res.end = pgmap->res.start + size - 1;
194 pgmap->res.flags = pci_resource_flags(pdev, bar); 205 pgmap->res.flags = pci_resource_flags(pdev, bar);
195 pgmap->ref = &pdev->p2pdma->devmap_ref; 206 pgmap->ref = &p2p_pgmap->ref;
196 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; 207 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
197 pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) - 208 pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
198 pci_resource_start(pdev, bar); 209 pci_resource_start(pdev, bar);
199 pgmap->kill = pci_p2pdma_percpu_kill; 210 pgmap->kill = pci_p2pdma_percpu_kill;
211 pgmap->cleanup = pci_p2pdma_percpu_cleanup;
200 212
201 addr = devm_memremap_pages(&pdev->dev, pgmap); 213 addr = devm_memremap_pages(&pdev->dev, pgmap);
202 if (IS_ERR(addr)) { 214 if (IS_ERR(addr)) {
@@ -204,19 +216,22 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
204 goto pgmap_free; 216 goto pgmap_free;
205 } 217 }
206 218
207 error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr, 219 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
208 pci_bus_address(pdev, bar) + offset, 220 pci_bus_address(pdev, bar) + offset,
209 resource_size(&pgmap->res), dev_to_node(&pdev->dev)); 221 resource_size(&pgmap->res), dev_to_node(&pdev->dev),
222 &p2p_pgmap->ref);
210 if (error) 223 if (error)
211 goto pgmap_free; 224 goto pages_free;
212 225
213 pci_info(pdev, "added peer-to-peer DMA memory %pR\n", 226 pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
214 &pgmap->res); 227 &pgmap->res);
215 228
216 return 0; 229 return 0;
217 230
231pages_free:
232 devm_memunmap_pages(&pdev->dev, pgmap);
218pgmap_free: 233pgmap_free:
219 devm_kfree(&pdev->dev, pgmap); 234 devm_kfree(&pdev->dev, p2p_pgmap);
220 return error; 235 return error;
221} 236}
222EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); 237EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@@ -585,19 +600,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
585 */ 600 */
586void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) 601void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
587{ 602{
588 void *ret; 603 void *ret = NULL;
604 struct percpu_ref *ref;
589 605
606 /*
607 * Pairs with synchronize_rcu() in pci_p2pdma_release() to
608 * ensure pdev->p2pdma is non-NULL for the duration of the
609 * read-lock.
610 */
611 rcu_read_lock();
590 if (unlikely(!pdev->p2pdma)) 612 if (unlikely(!pdev->p2pdma))
591 return NULL; 613 goto out;
592
593 if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
594 return NULL;
595
596 ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
597 614
598 if (unlikely(!ret)) 615 ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
599 percpu_ref_put(&pdev->p2pdma->devmap_ref); 616 (void **) &ref);
617 if (!ret)
618 goto out;
600 619
620 if (unlikely(!percpu_ref_tryget_live(ref))) {
621 gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
622 ret = NULL;
623 goto out;
624 }
625out:
626 rcu_read_unlock();
601 return ret; 627 return ret;
602} 628}
603EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); 629EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
@@ -610,8 +636,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
610 */ 636 */
611void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) 637void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
612{ 638{
613 gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size); 639 struct percpu_ref *ref;
614 percpu_ref_put(&pdev->p2pdma->devmap_ref); 640
641 gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
642 (void **) &ref);
643 percpu_ref_put(ref);
615} 644}
616EXPORT_SYMBOL_GPL(pci_free_p2pmem); 645EXPORT_SYMBOL_GPL(pci_free_p2pmem);
617 646