diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/posix/dma.c')
-rw-r--r-- | drivers/gpu/nvgpu/common/posix/dma.c | 228 |
1 files changed, 0 insertions, 228 deletions
diff --git a/drivers/gpu/nvgpu/common/posix/dma.c b/drivers/gpu/nvgpu/common/posix/dma.c deleted file mode 100644 index 95bb1a75..00000000 --- a/drivers/gpu/nvgpu/common/posix/dma.c +++ /dev/null | |||
@@ -1,228 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
20 | * DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include <stdlib.h> | ||
24 | |||
25 | #include <nvgpu/mm.h> | ||
26 | #include <nvgpu/vm.h> | ||
27 | #include <nvgpu/bug.h> | ||
28 | #include <nvgpu/dma.h> | ||
29 | #include <nvgpu/gmmu.h> | ||
30 | #include <nvgpu/nvgpu_mem.h> | ||
31 | #include <nvgpu/enabled.h> | ||
32 | |||
33 | /* | ||
34 | * In userspace vidmem vs sysmem is just a difference in what is placed in the | ||
35 | * aperture field. | ||
36 | */ | ||
37 | static int __nvgpu_do_dma_alloc(struct gk20a *g, unsigned long flags, | ||
38 | size_t size, struct nvgpu_mem *mem, | ||
39 | enum nvgpu_aperture ap) | ||
40 | { | ||
41 | void *memory = malloc(mem->aligned_size); | ||
42 | |||
43 | if (memory == NULL) | ||
44 | return -ENOMEM; | ||
45 | |||
46 | mem->cpu_va = memory; | ||
47 | mem->aperture = ap; | ||
48 | mem->size = size; | ||
49 | mem->aligned_size = PAGE_ALIGN(size); | ||
50 | mem->gpu_va = 0ULL; | ||
51 | mem->skip_wmb = true; | ||
52 | mem->vidmem_alloc = NULL; | ||
53 | mem->allocator = NULL; | ||
54 | |||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | bool nvgpu_iommuable(struct gk20a *g) | ||
59 | { | ||
60 | return false; | ||
61 | } | ||
62 | |||
63 | int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem) | ||
64 | { | ||
65 | return nvgpu_dma_alloc_flags(g, 0, size, mem); | ||
66 | } | ||
67 | |||
68 | int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, | ||
69 | struct nvgpu_mem *mem) | ||
70 | { | ||
71 | if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) { | ||
72 | /* | ||
73 | * First try vidmem. Obviously in userspace there's no such | ||
74 | * thing as vidmem per se but we will mark the aperture as | ||
75 | * vidmem. | ||
76 | */ | ||
77 | int err = nvgpu_dma_alloc_flags_vid(g, 0, size, mem); | ||
78 | |||
79 | if (!err) | ||
80 | return 0; | ||
81 | /* | ||
82 | * Fall back to sysmem (which may then also fail) in case | ||
83 | * vidmem is exhausted. | ||
84 | */ | ||
85 | } | ||
86 | |||
87 | return nvgpu_dma_alloc_flags_sys(g, flags, size, mem); | ||
88 | |||
89 | } | ||
90 | |||
91 | int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem) | ||
92 | { | ||
93 | return nvgpu_dma_alloc_flags_sys(g, 0, size, mem); | ||
94 | } | ||
95 | |||
96 | int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags, | ||
97 | size_t size, struct nvgpu_mem *mem) | ||
98 | { | ||
99 | return __nvgpu_do_dma_alloc(g, flags, size, mem, APERTURE_SYSMEM); | ||
100 | } | ||
101 | |||
102 | int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem) | ||
103 | { | ||
104 | return nvgpu_dma_alloc_flags_vid(g, 0, size, mem); | ||
105 | } | ||
106 | |||
107 | int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags, | ||
108 | size_t size, struct nvgpu_mem *mem) | ||
109 | { | ||
110 | return __nvgpu_do_dma_alloc(g, flags, size, mem, APERTURE_VIDMEM); | ||
111 | } | ||
112 | |||
113 | int nvgpu_dma_alloc_vid_at(struct gk20a *g, | ||
114 | size_t size, struct nvgpu_mem *mem, u64 at) | ||
115 | { | ||
116 | BUG(); | ||
117 | |||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, | ||
122 | size_t size, struct nvgpu_mem *mem, u64 at) | ||
123 | { | ||
124 | BUG(); | ||
125 | |||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem) | ||
130 | { | ||
131 | if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY)) | ||
132 | free(mem->cpu_va); | ||
133 | |||
134 | memset(mem, 0, sizeof(*mem)); | ||
135 | } | ||
136 | |||
137 | int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size, | ||
138 | struct nvgpu_mem *mem) | ||
139 | { | ||
140 | return nvgpu_dma_alloc_map_flags(vm, 0, size, mem); | ||
141 | } | ||
142 | |||
143 | int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, | ||
144 | size_t size, struct nvgpu_mem *mem) | ||
145 | { | ||
146 | if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) { | ||
147 | int err = nvgpu_dma_alloc_map_flags_vid(vm, | ||
148 | flags | NVGPU_DMA_NO_KERNEL_MAPPING, | ||
149 | size, mem); | ||
150 | |||
151 | if (!err) | ||
152 | return 0; | ||
153 | /* | ||
154 | * Fall back to sysmem (which may then also fail) in case | ||
155 | * vidmem is exhausted. | ||
156 | */ | ||
157 | } | ||
158 | |||
159 | return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem); | ||
160 | } | ||
161 | |||
162 | int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size, | ||
163 | struct nvgpu_mem *mem) | ||
164 | { | ||
165 | return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem); | ||
166 | } | ||
167 | |||
168 | int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, | ||
169 | size_t size, struct nvgpu_mem *mem) | ||
170 | { | ||
171 | int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem); | ||
172 | |||
173 | if (err) | ||
174 | return err; | ||
175 | |||
176 | mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0, | ||
177 | gk20a_mem_flag_none, false, | ||
178 | mem->aperture); | ||
179 | if (!mem->gpu_va) { | ||
180 | err = -ENOMEM; | ||
181 | goto fail_free; | ||
182 | } | ||
183 | |||
184 | return 0; | ||
185 | |||
186 | fail_free: | ||
187 | nvgpu_dma_free(vm->mm->g, mem); | ||
188 | return err; | ||
189 | } | ||
190 | |||
191 | int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size, | ||
192 | struct nvgpu_mem *mem) | ||
193 | { | ||
194 | return nvgpu_dma_alloc_map_flags_vid(vm, | ||
195 | NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); | ||
196 | } | ||
197 | |||
198 | int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, | ||
199 | size_t size, struct nvgpu_mem *mem) | ||
200 | { | ||
201 | int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem); | ||
202 | |||
203 | if (err) | ||
204 | return err; | ||
205 | |||
206 | mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0, | ||
207 | gk20a_mem_flag_none, false, | ||
208 | mem->aperture); | ||
209 | if (!mem->gpu_va) { | ||
210 | err = -ENOMEM; | ||
211 | goto fail_free; | ||
212 | } | ||
213 | |||
214 | return 0; | ||
215 | |||
216 | fail_free: | ||
217 | nvgpu_dma_free(vm->mm->g, mem); | ||
218 | return err; | ||
219 | } | ||
220 | |||
221 | void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem) | ||
222 | { | ||
223 | if (mem->gpu_va) | ||
224 | nvgpu_gmmu_unmap(vm, mem, mem->gpu_va); | ||
225 | mem->gpu_va = 0; | ||
226 | |||
227 | nvgpu_dma_free(vm->mm->g, mem); | ||
228 | } | ||