summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/gmmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/gmmu.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c109
1 files changed, 109 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
new file mode 100644
index 00000000..a2ed3f3a
--- /dev/null
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <nvgpu/log.h>
18#include <nvgpu/gmmu.h>
19#include <nvgpu/nvgpu_mem.h>
20
21#include "gk20a/gk20a.h"
22#include "gk20a/mm_gk20a.h"
23
24/*
25 * Core GMMU map function for the kernel to use. If @addr is 0 then the GPU
26 * VA will be allocated for you. If addr is non-zero then the buffer will be
27 * mapped at @addr.
28 */
29static u64 __nvgpu_gmmu_map(struct vm_gk20a *vm,
30 struct nvgpu_mem *mem,
31 u64 addr,
32 u64 size,
33 u32 flags,
34 int rw_flag,
35 bool priv,
36 enum nvgpu_aperture aperture)
37{
38 struct gk20a *g = gk20a_from_vm(vm);
39 u64 vaddr;
40
41 struct sg_table *sgt = mem->priv.sgt;
42
43 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
44 vaddr = g->ops.mm.gmmu_map(vm, addr,
45 sgt, /* sg table */
46 0, /* sg offset */
47 size,
48 gmmu_page_size_kernel,
49 0, /* kind */
50 0, /* ctag_offset */
51 flags, rw_flag,
52 false, /* clear_ctags */
53 false, /* sparse */
54 priv, /* priv */
55 NULL, /* mapping_batch handle */
56 aperture);
57 nvgpu_mutex_release(&vm->update_gmmu_lock);
58 if (!vaddr) {
59 nvgpu_err(g, "failed to allocate va space");
60 return 0;
61 }
62
63 return vaddr;
64}
65
66u64 nvgpu_gmmu_map(struct vm_gk20a *vm,
67 struct nvgpu_mem *mem,
68 u64 size,
69 u32 flags,
70 int rw_flag,
71 bool priv,
72 enum nvgpu_aperture aperture)
73{
74 return __nvgpu_gmmu_map(vm, mem, 0, size, flags, rw_flag, priv,
75 aperture);
76}
77
78/*
79 * Like nvgpu_gmmu_map() except it can work on a fixed address instead.
80 */
81u64 nvgpu_gmmu_map_fixed(struct vm_gk20a *vm,
82 struct nvgpu_mem *mem,
83 u64 addr,
84 u64 size,
85 u32 flags,
86 int rw_flag,
87 bool priv,
88 enum nvgpu_aperture aperture)
89{
90 return __nvgpu_gmmu_map(vm, mem, addr, size, flags, rw_flag, priv,
91 aperture);
92}
93
94void nvgpu_gmmu_unmap(struct vm_gk20a *vm, struct nvgpu_mem *mem, u64 gpu_va)
95{
96 struct gk20a *g = gk20a_from_vm(vm);
97
98 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
99 g->ops.mm.gmmu_unmap(vm,
100 gpu_va,
101 mem->size,
102 gmmu_page_size_kernel,
103 true, /*va_allocated */
104 gk20a_mem_flag_none,
105 false,
106 NULL);
107
108 nvgpu_mutex_release(&vm->update_gmmu_lock);
109}