diff options
author | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
---|---|---|
committer | Joshua Bakita <bakitajoshua@gmail.com> | 2024-09-25 16:09:09 -0400 |
commit | f347fde22f1297e4f022600d201780d5ead78114 (patch) | |
tree | 76be305d6187003a1e0486ff6e91efb1062ae118 /include/os/linux/nvidia_p2p.c | |
parent | 8340d234d78a7d0f46c11a584de538148b78b7cb (diff) |
Delete no-longer-needed nvgpu headersHEADmasterjbakita-wip
The dependency on these was removed in commit 8340d234.
Diffstat (limited to 'include/os/linux/nvidia_p2p.c')
-rw-r--r-- | include/os/linux/nvidia_p2p.c | 299 |
1 files changed, 0 insertions, 299 deletions
diff --git a/include/os/linux/nvidia_p2p.c b/include/os/linux/nvidia_p2p.c deleted file mode 100644 index 87db8c5..0000000 --- a/include/os/linux/nvidia_p2p.c +++ /dev/null | |||
@@ -1,299 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <linux/slab.h> | ||
24 | #include <linux/nv-p2p.h> | ||
25 | |||
26 | static void nvidia_p2p_mn_release(struct mmu_notifier *mn, | ||
27 | struct mm_struct *mm) | ||
28 | { | ||
29 | struct nvidia_p2p_page_table *page_table = container_of(mn, | ||
30 | struct nvidia_p2p_page_table, | ||
31 | mn); | ||
32 | |||
33 | page_table->free_callback(page_table->data); | ||
34 | } | ||
35 | |||
36 | static void nvidia_p2p_mn_invl_range_start(struct mmu_notifier *mn, | ||
37 | struct mm_struct *mm, unsigned long start, unsigned long end) | ||
38 | { | ||
39 | struct nvidia_p2p_page_table *page_table = container_of(mn, | ||
40 | struct nvidia_p2p_page_table, | ||
41 | mn); | ||
42 | u64 vaddr = 0; | ||
43 | u64 size = 0; | ||
44 | |||
45 | vaddr = page_table->vaddr; | ||
46 | size = page_table->size; | ||
47 | |||
48 | if (vaddr >= start && vaddr <= end) { | ||
49 | mmu_notifier_unregister_no_release(&page_table->mn, page_table->mm); | ||
50 | page_table->free_callback(page_table->data); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | static struct mmu_notifier_ops nvidia_p2p_mmu_ops = { | ||
55 | .release = nvidia_p2p_mn_release, | ||
56 | .invalidate_range_start = nvidia_p2p_mn_invl_range_start, | ||
57 | }; | ||
58 | |||
59 | int nvidia_p2p_get_pages(u64 vaddr, u64 size, | ||
60 | struct nvidia_p2p_page_table **page_table, | ||
61 | void (*free_callback)(void *data), void *data) | ||
62 | { | ||
63 | int ret = 0; | ||
64 | int user_pages = 0; | ||
65 | int locked = 0; | ||
66 | int nr_pages = size >> PAGE_SHIFT; | ||
67 | struct page **pages; | ||
68 | |||
69 | if (nr_pages <= 0) { | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | |||
73 | *page_table = kzalloc(sizeof(**page_table), GFP_KERNEL); | ||
74 | if (!*page_table) { | ||
75 | return -ENOMEM; | ||
76 | } | ||
77 | |||
78 | pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); | ||
79 | if (!pages) { | ||
80 | ret = -ENOMEM; | ||
81 | goto free_page_table; | ||
82 | } | ||
83 | down_read(¤t->mm->mmap_sem); | ||
84 | locked = 1; | ||
85 | user_pages = get_user_pages_locked(vaddr & PAGE_MASK, nr_pages, | ||
86 | FOLL_WRITE | FOLL_FORCE, | ||
87 | pages, &locked); | ||
88 | up_read(¤t->mm->mmap_sem); | ||
89 | if (user_pages != nr_pages) { | ||
90 | ret = user_pages < 0 ? user_pages : -ENOMEM; | ||
91 | goto free_pages; | ||
92 | } | ||
93 | |||
94 | (*page_table)->version = NVIDIA_P2P_PAGE_TABLE_VERSION; | ||
95 | (*page_table)->pages = pages; | ||
96 | (*page_table)->entries = user_pages; | ||
97 | (*page_table)->page_size = NVIDIA_P2P_PAGE_SIZE_4KB; | ||
98 | (*page_table)->size = size; | ||
99 | |||
100 | (*page_table)->mn.ops = &nvidia_p2p_mmu_ops; | ||
101 | (*page_table)->mm = current->mm; | ||
102 | (*page_table)->free_callback = free_callback; | ||
103 | (*page_table)->data = data; | ||
104 | (*page_table)->vaddr = vaddr; | ||
105 | mutex_init(&(*page_table)->lock); | ||
106 | (*page_table)->mapped = NVIDIA_P2P_PINNED; | ||
107 | |||
108 | ret = mmu_notifier_register(&(*page_table)->mn, (*page_table)->mm); | ||
109 | if (ret) { | ||
110 | goto free_pages; | ||
111 | } | ||
112 | |||
113 | return 0; | ||
114 | free_pages: | ||
115 | while (--user_pages >= 0) { | ||
116 | put_page(pages[user_pages]); | ||
117 | } | ||
118 | kfree(pages); | ||
119 | free_page_table: | ||
120 | kfree(*page_table); | ||
121 | *page_table = NULL; | ||
122 | return ret; | ||
123 | } | ||
124 | EXPORT_SYMBOL(nvidia_p2p_get_pages); | ||
125 | |||
126 | int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table) | ||
127 | { | ||
128 | if (!page_table) { | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | |||
132 | mmu_notifier_unregister(&page_table->mn, page_table->mm); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | EXPORT_SYMBOL(nvidia_p2p_put_pages); | ||
137 | |||
138 | int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table) | ||
139 | { | ||
140 | int user_pages = 0; | ||
141 | struct page **pages = NULL; | ||
142 | |||
143 | if (!page_table) { | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | mutex_lock(&page_table->lock); | ||
148 | |||
149 | if (page_table->mapped & NVIDIA_P2P_MAPPED) { | ||
150 | WARN(1, "Attempting to free unmapped pages"); | ||
151 | } | ||
152 | |||
153 | if (page_table->mapped & NVIDIA_P2P_PINNED) { | ||
154 | pages = page_table->pages; | ||
155 | user_pages = page_table->entries; | ||
156 | |||
157 | while (--user_pages >= 0) { | ||
158 | put_page(pages[user_pages]); | ||
159 | } | ||
160 | |||
161 | kfree(pages); | ||
162 | page_table->mapped &= (u32)~NVIDIA_P2P_PINNED; | ||
163 | } | ||
164 | |||
165 | mutex_unlock(&page_table->lock); | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | EXPORT_SYMBOL(nvidia_p2p_free_page_table); | ||
170 | |||
171 | int nvidia_p2p_dma_map_pages(struct device *dev, | ||
172 | struct nvidia_p2p_page_table *page_table, | ||
173 | struct nvidia_p2p_dma_mapping **dma_mapping, | ||
174 | enum dma_data_direction direction) | ||
175 | { | ||
176 | struct sg_table *sgt = NULL; | ||
177 | struct scatterlist *sg; | ||
178 | struct page **pages = NULL; | ||
179 | u32 nr_pages = 0; | ||
180 | int ret = 0; | ||
181 | int i, count; | ||
182 | |||
183 | if (!page_table) { | ||
184 | return -EINVAL; | ||
185 | } | ||
186 | |||
187 | mutex_lock(&page_table->lock); | ||
188 | |||
189 | pages = page_table->pages; | ||
190 | nr_pages = page_table->entries; | ||
191 | if (nr_pages <= 0) { | ||
192 | mutex_unlock(&page_table->lock); | ||
193 | return -EINVAL; | ||
194 | } | ||
195 | |||
196 | *dma_mapping = kzalloc(sizeof(**dma_mapping), GFP_KERNEL); | ||
197 | if (!*dma_mapping) { | ||
198 | mutex_unlock(&page_table->lock); | ||
199 | return -ENOMEM; | ||
200 | } | ||
201 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | ||
202 | if (!sgt) { | ||
203 | ret = -ENOMEM; | ||
204 | goto free_dma_mapping; | ||
205 | } | ||
206 | ret = sg_alloc_table_from_pages(sgt, pages, | ||
207 | nr_pages, 0, page_table->size, GFP_KERNEL); | ||
208 | if (ret) { | ||
209 | goto free_sgt; | ||
210 | } | ||
211 | |||
212 | (*dma_mapping)->version = NVIDIA_P2P_DMA_MAPPING_VERSION; | ||
213 | (*dma_mapping)->sgt = sgt; | ||
214 | (*dma_mapping)->dev = dev; | ||
215 | (*dma_mapping)->direction = direction; | ||
216 | (*dma_mapping)->page_table = page_table; | ||
217 | |||
218 | count = dma_map_sg(dev, sgt->sgl, sgt->nents, direction); | ||
219 | if (count < 1) { | ||
220 | goto free_sg_table; | ||
221 | } | ||
222 | |||
223 | (*dma_mapping)->entries = count; | ||
224 | |||
225 | (*dma_mapping)->hw_address = kcalloc(count, sizeof(u64), GFP_KERNEL); | ||
226 | if (!((*dma_mapping)->hw_address)) { | ||
227 | ret = -ENOMEM; | ||
228 | goto unmap_sg; | ||
229 | } | ||
230 | (*dma_mapping)->hw_len = kcalloc(count, sizeof(u64), GFP_KERNEL); | ||
231 | if (!((*dma_mapping)->hw_len)) { | ||
232 | ret = -ENOMEM; | ||
233 | goto free_hw_address; | ||
234 | } | ||
235 | |||
236 | for_each_sg(sgt->sgl, sg, count, i) { | ||
237 | (*dma_mapping)->hw_address[i] = sg_dma_address(sg); | ||
238 | (*dma_mapping)->hw_len[i] = sg_dma_len(sg); | ||
239 | } | ||
240 | (*dma_mapping)->page_table->mapped |= NVIDIA_P2P_MAPPED; | ||
241 | mutex_unlock(&page_table->lock); | ||
242 | |||
243 | return 0; | ||
244 | free_hw_address: | ||
245 | kfree((*dma_mapping)->hw_address); | ||
246 | unmap_sg: | ||
247 | dma_unmap_sg(dev, sgt->sgl, | ||
248 | sgt->nents, direction); | ||
249 | free_sg_table: | ||
250 | sg_free_table(sgt); | ||
251 | free_sgt: | ||
252 | kfree(sgt); | ||
253 | free_dma_mapping: | ||
254 | kfree(*dma_mapping); | ||
255 | *dma_mapping = NULL; | ||
256 | mutex_unlock(&page_table->lock); | ||
257 | |||
258 | return ret; | ||
259 | } | ||
260 | EXPORT_SYMBOL(nvidia_p2p_dma_map_pages); | ||
261 | |||
262 | int nvidia_p2p_dma_unmap_pages(struct nvidia_p2p_dma_mapping *dma_mapping) | ||
263 | { | ||
264 | struct nvidia_p2p_page_table *page_table = NULL; | ||
265 | |||
266 | if (!dma_mapping) { | ||
267 | return -EINVAL; | ||
268 | } | ||
269 | |||
270 | page_table = dma_mapping->page_table; | ||
271 | if (!page_table) { | ||
272 | return -EFAULT; | ||
273 | } | ||
274 | |||
275 | mutex_lock(&page_table->lock); | ||
276 | if (page_table->mapped & NVIDIA_P2P_MAPPED) { | ||
277 | kfree(dma_mapping->hw_len); | ||
278 | kfree(dma_mapping->hw_address); | ||
279 | if (dma_mapping->entries) | ||
280 | dma_unmap_sg(dma_mapping->dev, | ||
281 | dma_mapping->sgt->sgl, | ||
282 | dma_mapping->sgt->nents, | ||
283 | dma_mapping->direction); | ||
284 | sg_free_table(dma_mapping->sgt); | ||
285 | kfree(dma_mapping->sgt); | ||
286 | kfree(dma_mapping); | ||
287 | page_table->mapped &= (u32)~NVIDIA_P2P_MAPPED; | ||
288 | } | ||
289 | mutex_unlock(&page_table->lock); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | EXPORT_SYMBOL(nvidia_p2p_dma_unmap_pages); | ||
294 | |||
295 | int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping) | ||
296 | { | ||
297 | return nvidia_p2p_dma_unmap_pages(dma_mapping); | ||
298 | } | ||
299 | EXPORT_SYMBOL(nvidia_p2p_free_dma_mapping); | ||