summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/mm/nvgpu_mem.c109
-rw-r--r--drivers/gpu/nvgpu/common/posix/io.c5
-rw-r--r--drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c59
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c72
4 files changed, 185 insertions, 60 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
index 855d455d..9f3b6cfa 100644
--- a/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/mm/nvgpu_mem.c
@@ -177,3 +177,112 @@ u64 nvgpu_sgt_alignment(struct gk20a *g, struct nvgpu_sgt *sgt)
177 177
178 return align; 178 return align;
179} 179}
180
181u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
182{
183 u32 data = 0;
184
185 if (mem->aperture == APERTURE_SYSMEM) {
186 u32 *ptr = mem->cpu_va;
187
188 WARN_ON(!ptr);
189 data = ptr[w];
190 } else if (mem->aperture == APERTURE_VIDMEM) {
191 nvgpu_pramin_rd_n(g, mem, w * sizeof(u32), sizeof(u32), &data);
192 } else {
193 WARN_ON("Accessing unallocated nvgpu_mem");
194 }
195
196 return data;
197}
198
199u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
200{
201 WARN_ON(offset & 3);
202 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
203}
204
205void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
206 u32 offset, void *dest, u32 size)
207{
208 WARN_ON(offset & 3);
209 WARN_ON(size & 3);
210
211 if (mem->aperture == APERTURE_SYSMEM) {
212 u8 *src = (u8 *)mem->cpu_va + offset;
213
214 WARN_ON(!mem->cpu_va);
215 memcpy(dest, src, size);
216 } else if (mem->aperture == APERTURE_VIDMEM) {
217 nvgpu_pramin_rd_n(g, mem, offset, size, dest);
218 } else {
219 WARN_ON("Accessing unallocated nvgpu_mem");
220 }
221}
222
223void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
224{
225 if (mem->aperture == APERTURE_SYSMEM) {
226 u32 *ptr = mem->cpu_va;
227
228 WARN_ON(!ptr);
229 ptr[w] = data;
230 } else if (mem->aperture == APERTURE_VIDMEM) {
231 nvgpu_pramin_wr_n(g, mem, w * sizeof(u32), sizeof(u32), &data);
232 if (!mem->skip_wmb)
233 nvgpu_wmb();
234 } else {
235 WARN_ON("Accessing unallocated nvgpu_mem");
236 }
237}
238
239void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
240{
241 WARN_ON(offset & 3);
242 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
243}
244
245void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
246 void *src, u32 size)
247{
248 WARN_ON(offset & 3);
249 WARN_ON(size & 3);
250
251 if (mem->aperture == APERTURE_SYSMEM) {
252 u8 *dest = (u8 *)mem->cpu_va + offset;
253
254 WARN_ON(!mem->cpu_va);
255 memcpy(dest, src, size);
256 } else if (mem->aperture == APERTURE_VIDMEM) {
257 nvgpu_pramin_wr_n(g, mem, offset, size, src);
258 if (!mem->skip_wmb)
259 nvgpu_wmb();
260 } else {
261 WARN_ON("Accessing unallocated nvgpu_mem");
262 }
263}
264
265void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
266 u32 c, u32 size)
267{
268 WARN_ON(offset & 3);
269 WARN_ON(size & 3);
270 WARN_ON(c & ~0xff);
271
272 c &= 0xff;
273
274 if (mem->aperture == APERTURE_SYSMEM) {
275 u8 *dest = (u8 *)mem->cpu_va + offset;
276
277 WARN_ON(!mem->cpu_va);
278 memset(dest, c, size);
279 } else if (mem->aperture == APERTURE_VIDMEM) {
280 u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
281
282 nvgpu_pramin_memset(g, mem, offset, size, repeat_value);
283 if (!mem->skip_wmb)
284 nvgpu_wmb();
285 } else {
286 WARN_ON("Accessing unallocated nvgpu_mem");
287 }
288}
diff --git a/drivers/gpu/nvgpu/common/posix/io.c b/drivers/gpu/nvgpu/common/posix/io.c
index ce018940..dc32c20e 100644
--- a/drivers/gpu/nvgpu/common/posix/io.c
+++ b/drivers/gpu/nvgpu/common/posix/io.c
@@ -35,6 +35,11 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v)
35 BUG(); 35 BUG();
36} 36}
37 37
38void nvgpu_writel_relaxed(struct gk20a *g, u32 r, u32 v)
39{
40 BUG();
41}
42
38u32 nvgpu_readl(struct gk20a *g, u32 r) 43u32 nvgpu_readl(struct gk20a *g, u32 r)
39{ 44{
40 BUG(); 45 BUG();
diff --git a/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c b/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c
index 7f3bf9f1..fa92a7c6 100644
--- a/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c
@@ -27,65 +27,6 @@
27#include <nvgpu/nvgpu_mem.h> 27#include <nvgpu/nvgpu_mem.h>
28 28
29/* 29/*
30 * DMA memory buffers - obviously we don't really have DMA in userspace but we
31 * can emulate a lot of the DMA mem functionality for unit testing purposes.
32 */
33
34u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
35{
36 u32 *mem_ptr = (u32 *)mem->cpu_va;
37
38 return mem_ptr[w];
39}
40
41u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
42{
43 if (offset & 0x3)
44 BUG();
45
46 return nvgpu_mem_rd32(g, mem, offset >> 2);
47}
48
49void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
50 void *dest, u32 size)
51{
52 if (offset & 0x3 || size & 0x3)
53 BUG();
54
55 memcpy(dest, ((char *)mem->cpu_va) + offset, size);
56}
57
58void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
59{
60 u32 *mem_ptr = (u32 *)mem->cpu_va;
61
62 mem_ptr[w] = data;
63}
64
65void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
66{
67 if (offset & 0x3)
68 BUG();
69
70 nvgpu_mem_wr32(g, mem, offset >> 2, data);
71}
72
73void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
74 void *src, u32 size)
75{
76 if (offset & 0x3 || size & 0x3)
77 BUG();
78
79 memcpy(((char *)mem->cpu_va) + offset, src, size);
80}
81
82void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
83 u32 c, u32 size)
84{
85 memset(((char *)mem->cpu_va) + offset, c, size);
86}
87
88/*
89 * These functions are somewhat meaningless. 30 * These functions are somewhat meaningless.
90 */ 31 */
91u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem) 32u64 nvgpu_mem_get_addr(struct gk20a *g, struct nvgpu_mem *mem)
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index 4c6a4a0d..99d588aa 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -28,11 +28,18 @@
28#include "gk20a/gk20a.h" 28#include "gk20a/gk20a.h"
29 29
30/* 30/*
31 * This typedef is for functions that get called during the access_batched()
32 * operation.
33 */
34typedef void (*pramin_access_batch_fn)(struct gk20a *g, u32 start, u32 words,
35 u32 **arg);
36
37/*
31 * The PRAMIN range is 1 MB, must change base addr if a buffer crosses that. 38 * The PRAMIN range is 1 MB, must change base addr if a buffer crosses that.
32 * This same loop is used for read/write/memset. Offset and size in bytes. 39 * This same loop is used for read/write/memset. Offset and size in bytes.
33 * One call to "loop" is done per range, with "arg" supplied. 40 * One call to "loop" is done per range, with "arg" supplied.
34 */ 41 */
35void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem, 42static void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
36 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg) 43 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
37{ 44{
38 struct nvgpu_page_alloc *alloc = NULL; 45 struct nvgpu_page_alloc *alloc = NULL;
@@ -87,6 +94,69 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
87 } 94 }
88} 95}
89 96
97static void nvgpu_pramin_access_batch_rd_n(struct gk20a *g,
98 u32 start, u32 words, u32 **arg)
99{
100 u32 r = start, *dest_u32 = *arg;
101
102 while (words--) {
103 *dest_u32++ = nvgpu_readl(g, r);
104 r += sizeof(u32);
105 }
106
107 *arg = dest_u32;
108}
109
110void nvgpu_pramin_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
111 u32 start, u32 size, void *dest)
112{
113 u32 *dest_u32 = dest;
114
115 return nvgpu_pramin_access_batched(g, mem, start, size,
116 nvgpu_pramin_access_batch_rd_n, &dest_u32);
117}
118
119static void nvgpu_pramin_access_batch_wr_n(struct gk20a *g,
120 u32 start, u32 words, u32 **arg)
121{
122 u32 r = start, *src_u32 = *arg;
123
124 while (words--) {
125 nvgpu_writel_relaxed(g, r, *src_u32++);
126 r += sizeof(u32);
127 }
128
129 *arg = src_u32;
130}
131
132void nvgpu_pramin_wr_n(struct gk20a *g, struct nvgpu_mem *mem,
133 u32 start, u32 size, void *src)
134{
135 u32 *src_u32 = src;
136
137 return nvgpu_pramin_access_batched(g, mem, start, size,
138 nvgpu_pramin_access_batch_wr_n, &src_u32);
139}
140
141static void nvgpu_pramin_access_batch_set(struct gk20a *g,
142 u32 start, u32 words, u32 **arg)
143{
144 u32 r = start, repeat = **arg;
145
146 while (words--) {
147 nvgpu_writel_relaxed(g, r, repeat);
148 r += sizeof(u32);
149 }
150}
151
152void nvgpu_pramin_memset(struct gk20a *g, struct nvgpu_mem *mem,
153 u32 start, u32 size, u32 w)
154{
155 u32 *p = &w;
156
157 return nvgpu_pramin_access_batched(g, mem, start, size,
158 nvgpu_pramin_access_batch_set, &p);
159}
90void nvgpu_init_pramin(struct mm_gk20a *mm) 160void nvgpu_init_pramin(struct mm_gk20a *mm)
91{ 161{
92 mm->pramin_window = 0; 162 mm->pramin_window = 0;