summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm_area.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm_area.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm_area.c231
1 files changed, 231 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c
new file mode 100644
index 00000000..b6286c43
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/mm/vm_area.c
@@ -0,0 +1,231 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#include <uapi/linux/nvgpu.h>
24
25#include <nvgpu/vm.h>
26#include <nvgpu/vm_area.h>
27
28#include "gk20a/gk20a.h"
29#include "gk20a/mm_gk20a.h"
30
31struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr)
32{
33 struct nvgpu_vm_area *vm_area;
34
35 nvgpu_list_for_each_entry(vm_area, &vm->vm_area_list,
36 nvgpu_vm_area, vm_area_list) {
37 if (addr >= vm_area->addr &&
38 addr < (u64)vm_area->addr + (u64)vm_area->size)
39 return vm_area;
40 }
41
42 return NULL;
43}
44
45int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
46 u64 map_addr, u64 map_size, int pgsz_idx,
47 struct nvgpu_vm_area **pvm_area)
48{
49 struct gk20a *g = vm->mm->g;
50 struct nvgpu_vm_area *vm_area;
51 struct nvgpu_mapped_buf *buffer;
52 u64 map_end = map_addr + map_size;
53
54 /* can wrap around with insane map_size; zero is disallowed too */
55 if (map_end <= map_addr) {
56 nvgpu_warn(g, "fixed offset mapping with invalid map_size");
57 return -EINVAL;
58 }
59
60 if (map_addr & (vm->gmmu_page_sizes[pgsz_idx] - 1)) {
61 nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx",
62 map_addr);
63 return -EINVAL;
64 }
65
66 /* Find the space reservation, but it's ok to have none for
67 * userspace-managed address spaces */
68 vm_area = nvgpu_vm_area_find(vm, map_addr);
69 if (!vm_area && !vm->userspace_managed) {
70 nvgpu_warn(g, "fixed offset mapping without space allocation");
71 return -EINVAL;
72 }
73
74 /* Mapped area should fit inside va, if there's one */
75 if (vm_area && map_end > vm_area->addr + vm_area->size) {
76 nvgpu_warn(g, "fixed offset mapping size overflows va node");
77 return -EINVAL;
78 }
79
80 /* check that this mapping does not collide with existing
81 * mappings by checking the buffer with the highest GPU VA
82 * that is less than our buffer end */
83 buffer = __nvgpu_vm_find_mapped_buf_less_than(
84 vm, map_addr + map_size);
85 if (buffer && buffer->addr + buffer->size > map_addr) {
86 nvgpu_warn(g, "overlapping buffer map requested");
87 return -EINVAL;
88 }
89
90 *pvm_area = vm_area;
91
92 return 0;
93}
94
95int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
96 u64 *addr, u32 flags)
97{
98 struct gk20a *g = vm->mm->g;
99 struct nvgpu_allocator *vma;
100 struct nvgpu_vm_area *vm_area;
101 u64 vaddr_start = 0;
102 int pgsz_idx = gmmu_page_size_small;
103
104 nvgpu_log(g, gpu_dbg_map,
105 "ADD vm_area: pgsz=%#-8x pages=%-9u addr=%#-14llx flags=0x%x",
106 page_size, pages, *addr, flags);
107
108 for (; pgsz_idx < gmmu_nr_page_sizes; pgsz_idx++) {
109 if (vm->gmmu_page_sizes[pgsz_idx] == page_size)
110 break;
111 }
112
113 if (pgsz_idx > gmmu_page_size_big)
114 return -EINVAL;
115
116 if (!vm->big_pages && pgsz_idx == gmmu_page_size_big)
117 return -EINVAL;
118
119 vm_area = nvgpu_kzalloc(g, sizeof(*vm_area));
120 if (!vm_area)
121 goto clean_up_err;
122
123 vma = vm->vma[pgsz_idx];
124 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET)
125 vaddr_start = nvgpu_alloc_fixed(vma, *addr,
126 (u64)pages *
127 (u64)page_size,
128 page_size);
129 else
130 vaddr_start = nvgpu_alloc(vma,
131 (u64)pages *
132 (u64)page_size);
133
134 if (!vaddr_start)
135 goto clean_up_err;
136
137 vm_area->flags = flags;
138 vm_area->addr = vaddr_start;
139 vm_area->size = (u64)page_size * (u64)pages;
140 vm_area->pgsz_idx = pgsz_idx;
141 nvgpu_init_list_node(&vm_area->buffer_list_head);
142 nvgpu_init_list_node(&vm_area->vm_area_list);
143
144 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
145
146 if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) {
147 u64 map_addr = g->ops.mm.gmmu_map(vm, vaddr_start,
148 NULL,
149 0,
150 vm_area->size,
151 pgsz_idx,
152 0,
153 0,
154 flags,
155 gk20a_mem_flag_none,
156 false,
157 true,
158 false,
159 NULL,
160 APERTURE_INVALID);
161 if (!map_addr) {
162 nvgpu_mutex_release(&vm->update_gmmu_lock);
163 goto clean_up_err;
164 }
165
166 vm_area->sparse = true;
167 }
168 nvgpu_list_add_tail(&vm_area->vm_area_list, &vm->vm_area_list);
169
170 nvgpu_mutex_release(&vm->update_gmmu_lock);
171
172 *addr = vaddr_start;
173 return 0;
174
175clean_up_err:
176 if (vaddr_start)
177 nvgpu_free(vma, vaddr_start);
178 if (vm_area)
179 nvgpu_kfree(g, vm_area);
180 return -ENOMEM;
181}
182
183int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr)
184{
185 struct gk20a *g = gk20a_from_vm(vm);
186 struct nvgpu_mapped_buf *buffer, *n;
187 struct nvgpu_vm_area *vm_area;
188
189 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
190 vm_area = nvgpu_vm_area_find(vm, addr);
191 if (!vm_area) {
192 nvgpu_mutex_release(&vm->update_gmmu_lock);
193 return 0;
194 }
195 nvgpu_list_del(&vm_area->vm_area_list);
196 nvgpu_mutex_release(&vm->update_gmmu_lock);
197
198 nvgpu_log(g, gpu_dbg_map,
199 "DEL vm_area: pgsz=%#-8x pages=%-9llu "
200 "addr=%#-14llx flags=0x%x",
201 vm->gmmu_page_sizes[vm_area->pgsz_idx],
202 vm_area->size / vm->gmmu_page_sizes[vm_area->pgsz_idx],
203 vm_area->addr,
204 vm_area->flags);
205
206 /* Decrement the ref count on all buffers in this vm_area. This
207 * allows userspace to let the kernel free mappings that are
208 * only used by this vm_area. */
209 nvgpu_list_for_each_entry_safe(buffer, n,
210 &vm_area->buffer_list_head,
211 nvgpu_mapped_buf, buffer_list) {
212 nvgpu_list_del(&buffer->buffer_list);
213 nvgpu_ref_put(&buffer->ref, __nvgpu_vm_unmap_ref);
214 }
215
216 /* if this was a sparse mapping, free the va */
217 if (vm_area->sparse)
218 g->ops.mm.gmmu_unmap(vm,
219 vm_area->addr,
220 vm_area->size,
221 vm_area->pgsz_idx,
222 true,
223 gk20a_mem_flag_none,
224 true,
225 NULL);
226
227 nvgpu_free(vm->vma[vm_area->pgsz_idx], vm_area->addr);
228 nvgpu_kfree(g, vm_area);
229
230 return 0;
231}