summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/posix/dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/posix/dma.c')
-rw-r--r--drivers/gpu/nvgpu/common/posix/dma.c220
1 files changed, 220 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/posix/dma.c b/drivers/gpu/nvgpu/common/posix/dma.c
new file mode 100644
index 00000000..23f59501
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/posix/dma.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <stdlib.h>
24
25#include <nvgpu/mm.h>
26#include <nvgpu/vm.h>
27#include <nvgpu/bug.h>
28#include <nvgpu/dma.h>
29#include <nvgpu/gmmu.h>
30#include <nvgpu/nvgpu_mem.h>
31#include <nvgpu/enabled.h>
32
33/*
34 * In userspace vidmem vs sysmem is just a difference in what is placed in the
35 * aperture field.
36 */
37static int __nvgpu_do_dma_alloc(struct gk20a *g, unsigned long flags,
38 size_t size, struct nvgpu_mem *mem,
39 enum nvgpu_aperture ap)
40{
41 void *memory = malloc(mem->aligned_size);
42
43 if (memory == NULL)
44 return -ENOMEM;
45
46 mem->cpu_va = memory;
47 mem->aperture = ap;
48 mem->size = size;
49 mem->aligned_size = PAGE_ALIGN(size);
50 mem->gpu_va = 0ULL;
51 mem->skip_wmb = true;
52 mem->vidmem_alloc = NULL;
53 mem->allocator = NULL;
54
55 return 0;
56}
57
58bool nvgpu_iommuable(struct gk20a *g)
59{
60 return false;
61}
62
63int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
64{
65 return nvgpu_dma_alloc_flags(g, 0, size, mem);
66}
67
68int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
69 struct nvgpu_mem *mem)
70{
71 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) {
72 /*
73 * First try vidmem. Obviously in userspace there's no such
74 * thing as vidmem per se but we will mark the aperture as
75 * vidmem.
76 */
77 int err = nvgpu_dma_alloc_flags_vid(g, 0, size, mem);
78
79 if (!err)
80 return 0;
81 /*
82 * Fall back to sysmem (which may then also fail) in case
83 * vidmem is exhausted.
84 */
85 }
86
87 return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
88
89}
90
91int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
92{
93 return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
94}
95
96int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
97 size_t size, struct nvgpu_mem *mem)
98{
99 return __nvgpu_do_dma_alloc(g, flags, size, mem, APERTURE_SYSMEM);
100}
101
102int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
103{
104 return nvgpu_dma_alloc_flags_vid(g, 0, size, mem);
105}
106
107int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
108 size_t size, struct nvgpu_mem *mem)
109{
110 return __nvgpu_do_dma_alloc(g, flags, size, mem, APERTURE_VIDMEM);
111}
112
113int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
114 size_t size, struct nvgpu_mem *mem, u64 at)
115{
116 BUG();
117
118 return 0;
119}
120
121void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
122{
123 if (!(mem->mem_flags & NVGPU_MEM_FLAG_SHADOW_COPY))
124 free(mem->cpu_va);
125
126 memset(mem, 0, sizeof(*mem));
127}
128
129int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
130 struct nvgpu_mem *mem)
131{
132 return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
133}
134
135int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
136 size_t size, struct nvgpu_mem *mem)
137{
138 if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) {
139 int err = nvgpu_dma_alloc_map_flags_vid(vm,
140 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
141 size, mem);
142
143 if (!err)
144 return 0;
145 /*
146 * Fall back to sysmem (which may then also fail) in case
147 * vidmem is exhausted.
148 */
149 }
150
151 return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
152}
153
154int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
155 struct nvgpu_mem *mem)
156{
157 return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
158}
159
160int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
161 size_t size, struct nvgpu_mem *mem)
162{
163 int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
164
165 if (err)
166 return err;
167
168 mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
169 gk20a_mem_flag_none, false,
170 mem->aperture);
171 if (!mem->gpu_va) {
172 err = -ENOMEM;
173 goto fail_free;
174 }
175
176 return 0;
177
178fail_free:
179 nvgpu_dma_free(vm->mm->g, mem);
180 return err;
181}
182
183int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
184 struct nvgpu_mem *mem)
185{
186 return nvgpu_dma_alloc_map_flags_vid(vm,
187 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
188}
189
190int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
191 size_t size, struct nvgpu_mem *mem)
192{
193 int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
194
195 if (err)
196 return err;
197
198 mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
199 gk20a_mem_flag_none, false,
200 mem->aperture);
201 if (!mem->gpu_va) {
202 err = -ENOMEM;
203 goto fail_free;
204 }
205
206 return 0;
207
208fail_free:
209 nvgpu_dma_free(vm->mm->g, mem);
210 return err;
211}
212
213void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
214{
215 if (mem->gpu_va)
216 nvgpu_gmmu_unmap(vm, mem, mem->gpu_va);
217 mem->gpu_va = 0;
218
219 nvgpu_dma_free(vm->mm->g, mem);
220}