summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-08-14 14:30:48 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-09-05 23:38:42 -0400
commitb44c7fdb114a63ab98fffc0f246776b56399ff64 (patch)
treec523c2ea516aaed3b68271a77cf88ffa132e329d /drivers/gpu/nvgpu/common/mm
parentef851272e5201f343c9b287a9eacfc25d4912276 (diff)
gpu: nvgpu: Move common DMA code to common/mm
This migrates the common DMA code (os agnostic) to the common directory. This new unit will be the common DMA allocator that lets users allocate SYSMEM, VIDMEM, or either. Other units will be responsible for actually handling the mechanics of allocating VIDMEM or SYSMEM. Also update the names of the DMA related files so that tmake doesn't complain about duplicate C file names. To do this call the common DMA file dma.c and prepend the OS to the other DMA files. So now we have: common/mm/dma.c os/posix/posix-dma.c os/linux/linux-dma.c JIRA NVGPU-990 Change-Id: I22d2d41803ad89be7d9c28f87864ce4fedf10836 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1799807 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/dma.c197
1 files changed, 197 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/dma.c b/drivers/gpu/nvgpu/common/mm/dma.c
new file mode 100644
index 00000000..f7331f8e
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/mm/dma.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <nvgpu/mm.h>
24#include <nvgpu/vm.h>
25#include <nvgpu/dma.h>
26#include <nvgpu/gmmu.h>
27#include <nvgpu/enabled.h>
28#include <nvgpu/nvgpu_mem.h>
29
30int nvgpu_dma_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
31{
32 return nvgpu_dma_alloc_flags(g, 0, size, mem);
33}
34
35int nvgpu_dma_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
36 struct nvgpu_mem *mem)
37{
38 if (!nvgpu_is_enabled(g, NVGPU_MM_UNIFIED_MEMORY)) {
39 /*
40 * Force the no-kernel-mapping flag on because we don't support
41 * the lack of it for vidmem - the user should not care when
42 * using nvgpu_gmmu_alloc_map and it's vidmem, or if there's a
43 * difference, the user should use the flag explicitly anyway.
44 *
45 * Incoming flags are ignored here, since bits other than the
46 * no-kernel-mapping flag are ignored by the vidmem mapping
47 * functions anyway.
48 */
49 int err = nvgpu_dma_alloc_flags_vid(g,
50 NVGPU_DMA_NO_KERNEL_MAPPING,
51 size, mem);
52
53 if (!err)
54 return 0;
55 /*
56 * Fall back to sysmem (which may then also fail) in case
57 * vidmem is exhausted.
58 */
59 }
60
61 return nvgpu_dma_alloc_flags_sys(g, flags, size, mem);
62}
63
64int nvgpu_dma_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
65{
66 return nvgpu_dma_alloc_flags_sys(g, 0, size, mem);
67}
68
69int nvgpu_dma_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
70{
71 return nvgpu_dma_alloc_flags_vid(g,
72 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
73}
74
75int nvgpu_dma_alloc_flags_vid(struct gk20a *g, unsigned long flags,
76 size_t size, struct nvgpu_mem *mem)
77{
78 return nvgpu_dma_alloc_flags_vid_at(g, flags, size, mem, 0);
79}
80
81int nvgpu_dma_alloc_vid_at(struct gk20a *g,
82 size_t size, struct nvgpu_mem *mem, u64 at)
83{
84 return nvgpu_dma_alloc_flags_vid_at(g,
85 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem, at);
86}
87
88int nvgpu_dma_alloc_map(struct vm_gk20a *vm, size_t size,
89 struct nvgpu_mem *mem)
90{
91 return nvgpu_dma_alloc_map_flags(vm, 0, size, mem);
92}
93
94int nvgpu_dma_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
95 size_t size, struct nvgpu_mem *mem)
96{
97 if (!nvgpu_is_enabled(gk20a_from_vm(vm), NVGPU_MM_UNIFIED_MEMORY)) {
98 /*
99 * Force the no-kernel-mapping flag on because we don't support
100 * the lack of it for vidmem - the user should not care when
101 * using nvgpu_dma_alloc_map and it's vidmem, or if there's a
102 * difference, the user should use the flag explicitly anyway.
103 */
104 int err = nvgpu_dma_alloc_map_flags_vid(vm,
105 flags | NVGPU_DMA_NO_KERNEL_MAPPING,
106 size, mem);
107
108 if (!err)
109 return 0;
110 /*
111 * Fall back to sysmem (which may then also fail) in case
112 * vidmem is exhausted.
113 */
114 }
115
116 return nvgpu_dma_alloc_map_flags_sys(vm, flags, size, mem);
117}
118
119int nvgpu_dma_alloc_map_sys(struct vm_gk20a *vm, size_t size,
120 struct nvgpu_mem *mem)
121{
122 return nvgpu_dma_alloc_map_flags_sys(vm, 0, size, mem);
123}
124
125int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
126 size_t size, struct nvgpu_mem *mem)
127{
128 int err = nvgpu_dma_alloc_flags_sys(vm->mm->g, flags, size, mem);
129
130 if (err)
131 return err;
132
133 mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
134 gk20a_mem_flag_none, false,
135 mem->aperture);
136 if (!mem->gpu_va) {
137 err = -ENOMEM;
138 goto fail_free;
139 }
140
141 return 0;
142
143fail_free:
144 nvgpu_dma_free(vm->mm->g, mem);
145 return err;
146}
147
148int nvgpu_dma_alloc_map_vid(struct vm_gk20a *vm, size_t size,
149 struct nvgpu_mem *mem)
150{
151 return nvgpu_dma_alloc_map_flags_vid(vm,
152 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
153}
154
155int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
156 size_t size, struct nvgpu_mem *mem)
157{
158 int err = nvgpu_dma_alloc_flags_vid(vm->mm->g, flags, size, mem);
159
160 if (err)
161 return err;
162
163 mem->gpu_va = nvgpu_gmmu_map(vm, mem, size, 0,
164 gk20a_mem_flag_none, false,
165 mem->aperture);
166 if (!mem->gpu_va) {
167 err = -ENOMEM;
168 goto fail_free;
169 }
170
171 return 0;
172
173fail_free:
174 nvgpu_dma_free(vm->mm->g, mem);
175 return err;
176}
177
178void nvgpu_dma_free(struct gk20a *g, struct nvgpu_mem *mem)
179{
180 switch (mem->aperture) {
181 case APERTURE_SYSMEM:
182 return nvgpu_dma_free_sys(g, mem);
183 case APERTURE_VIDMEM:
184 return nvgpu_dma_free_vid(g, mem);
185 default:
186 break; /* like free() on "null" memory */
187 }
188}
189
190void nvgpu_dma_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
191{
192 if (mem->gpu_va)
193 nvgpu_gmmu_unmap(vm, mem, mem->gpu_va);
194 mem->gpu_va = 0;
195
196 nvgpu_dma_free(vm->mm->g, mem);
197}