summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os')
-rw-r--r--drivers/gpu/nvgpu/os/linux/io.c12
-rw-r--r--drivers/gpu/nvgpu/os/linux/nvgpu_mem.c201
2 files changed, 12 insertions, 201 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/io.c b/drivers/gpu/nvgpu/os/linux/io.c
index c06512a5..9a0e29d7 100644
--- a/drivers/gpu/nvgpu/os/linux/io.c
+++ b/drivers/gpu/nvgpu/os/linux/io.c
@@ -31,6 +31,18 @@ void nvgpu_writel(struct gk20a *g, u32 r, u32 v)
31 } 31 }
32} 32}
33 33
34void nvgpu_writel_relaxed(struct gk20a *g, u32 r, u32 v)
35{
36 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
37
38 if (unlikely(!l->regs)) {
39 __gk20a_warn_on_no_regs();
40 nvgpu_log(g, gpu_dbg_reg, "r=0x%x v=0x%x (failed)", r, v);
41 } else {
42 writel_relaxed(v, l->regs + r);
43 }
44}
45
34u32 nvgpu_readl(struct gk20a *g, u32 r) 46u32 nvgpu_readl(struct gk20a *g, u32 r)
35{ 47{
36 u32 v = __nvgpu_readl(g, r); 48 u32 v = __nvgpu_readl(g, r);
diff --git a/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c
index 04b2afa7..aa8fcd84 100644
--- a/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c
@@ -48,207 +48,6 @@ static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl)
48 return ipa; 48 return ipa;
49} 49}
50 50
51static void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
52{
53 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
54 u32 r = start, *dest_u32 = *arg;
55
56 if (!l->regs) {
57 __gk20a_warn_on_no_regs();
58 return;
59 }
60
61 while (words--) {
62 *dest_u32++ = gk20a_readl(g, r);
63 r += sizeof(u32);
64 }
65
66 *arg = dest_u32;
67}
68
69u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
70{
71 u32 data = 0;
72
73 if (mem->aperture == APERTURE_SYSMEM) {
74 u32 *ptr = mem->cpu_va;
75
76 WARN_ON(!ptr);
77 data = ptr[w];
78#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
79 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
80#endif
81 } else if (mem->aperture == APERTURE_VIDMEM) {
82 u32 value;
83 u32 *p = &value;
84
85 nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
86 sizeof(u32), pramin_access_batch_rd_n, &p);
87
88 data = value;
89
90 } else {
91 WARN_ON("Accessing unallocated nvgpu_mem");
92 }
93
94 return data;
95}
96
97u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
98{
99 WARN_ON(offset & 3);
100 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
101}
102
103void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
104 u32 offset, void *dest, u32 size)
105{
106 WARN_ON(offset & 3);
107 WARN_ON(size & 3);
108
109 if (mem->aperture == APERTURE_SYSMEM) {
110 u8 *src = (u8 *)mem->cpu_va + offset;
111
112 WARN_ON(!mem->cpu_va);
113 memcpy(dest, src, size);
114#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
115 if (size)
116 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
117 src, *dest, size);
118#endif
119 } else if (mem->aperture == APERTURE_VIDMEM) {
120 u32 *dest_u32 = dest;
121
122 nvgpu_pramin_access_batched(g, mem, offset, size,
123 pramin_access_batch_rd_n, &dest_u32);
124 } else {
125 WARN_ON("Accessing unallocated nvgpu_mem");
126 }
127}
128
129static void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32 **arg)
130{
131 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
132 u32 r = start, *src_u32 = *arg;
133
134 if (!l->regs) {
135 __gk20a_warn_on_no_regs();
136 return;
137 }
138
139 while (words--) {
140 writel_relaxed(*src_u32++, l->regs + r);
141 r += sizeof(u32);
142 }
143
144 *arg = src_u32;
145}
146
147void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
148{
149 if (mem->aperture == APERTURE_SYSMEM) {
150 u32 *ptr = mem->cpu_va;
151
152 WARN_ON(!ptr);
153#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
154 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
155#endif
156 ptr[w] = data;
157 } else if (mem->aperture == APERTURE_VIDMEM) {
158 u32 value = data;
159 u32 *p = &value;
160
161 nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
162 sizeof(u32), pramin_access_batch_wr_n, &p);
163 if (!mem->skip_wmb)
164 wmb();
165 } else {
166 WARN_ON("Accessing unallocated nvgpu_mem");
167 }
168}
169
170void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
171{
172 WARN_ON(offset & 3);
173 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
174}
175
176void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
177 void *src, u32 size)
178{
179 WARN_ON(offset & 3);
180 WARN_ON(size & 3);
181
182 if (mem->aperture == APERTURE_SYSMEM) {
183 u8 *dest = (u8 *)mem->cpu_va + offset;
184
185 WARN_ON(!mem->cpu_va);
186#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
187 if (size)
188 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
189 dest, *src, size);
190#endif
191 memcpy(dest, src, size);
192 } else if (mem->aperture == APERTURE_VIDMEM) {
193 u32 *src_u32 = src;
194
195 nvgpu_pramin_access_batched(g, mem, offset, size,
196 pramin_access_batch_wr_n, &src_u32);
197 if (!mem->skip_wmb)
198 wmb();
199 } else {
200 WARN_ON("Accessing unallocated nvgpu_mem");
201 }
202}
203
204static void pramin_access_batch_set(struct gk20a *g, u32 start, u32 words, u32 **arg)
205{
206 struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g);
207 u32 r = start, repeat = **arg;
208
209 if (!l->regs) {
210 __gk20a_warn_on_no_regs();
211 return;
212 }
213
214 while (words--) {
215 writel_relaxed(repeat, l->regs + r);
216 r += sizeof(u32);
217 }
218}
219
220void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
221 u32 c, u32 size)
222{
223 WARN_ON(offset & 3);
224 WARN_ON(size & 3);
225 WARN_ON(c & ~0xff);
226
227 c &= 0xff;
228
229 if (mem->aperture == APERTURE_SYSMEM) {
230 u8 *dest = (u8 *)mem->cpu_va + offset;
231
232 WARN_ON(!mem->cpu_va);
233#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
234 if (size)
235 nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x [times %d]",
236 dest, c, size);
237#endif
238 memset(dest, c, size);
239 } else if (mem->aperture == APERTURE_VIDMEM) {
240 u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
241 u32 *p = &repeat_value;
242
243 nvgpu_pramin_access_batched(g, mem, offset, size,
244 pramin_access_batch_set, &p);
245 if (!mem->skip_wmb)
246 wmb();
247 } else {
248 WARN_ON("Accessing unallocated nvgpu_mem");
249 }
250}
251
252/* 51/*
253 * Obtain a SYSMEM address from a Linux SGL. This should eventually go away 52 * Obtain a SYSMEM address from a Linux SGL. This should eventually go away
254 * and/or become private to this file once all bad usages of Linux SGLs are 53 * and/or become private to this file once all bad usages of Linux SGLs are