diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/channel.c | 7 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/debug_mm.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | 24 |
3 files changed, 13 insertions, 22 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/channel.c b/drivers/gpu/nvgpu/common/linux/channel.c index e2a10634..727b5067 100644 --- a/drivers/gpu/nvgpu/common/linux/channel.c +++ b/drivers/gpu/nvgpu/common/linux/channel.c | |||
@@ -692,10 +692,6 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c, | |||
692 | * This path (from userspace to sysmem) is special in order to | 692 | * This path (from userspace to sysmem) is special in order to |
693 | * avoid two copies unnecessarily (from user to pipe, then from | 693 | * avoid two copies unnecessarily (from user to pipe, then from |
694 | * pipe to gpu sysmem buffer). | 694 | * pipe to gpu sysmem buffer). |
695 | * | ||
696 | * As a special case, the pipe buffer exists if PRAMIN writes | ||
697 | * are forced, although the buffers may not be in vidmem in | ||
698 | * that case. | ||
699 | */ | 695 | */ |
700 | if (end > gpfifo_size) { | 696 | if (end > gpfifo_size) { |
701 | /* wrap-around */ | 697 | /* wrap-around */ |
@@ -723,8 +719,7 @@ static int gk20a_submit_append_gpfifo(struct channel_gk20a *c, | |||
723 | 0, num_entries); | 719 | 0, num_entries); |
724 | goto out; | 720 | goto out; |
725 | } else if (user_gpfifo) { | 721 | } else if (user_gpfifo) { |
726 | /* from userspace to vidmem or sysmem when pramin forced, use | 722 | /* from userspace to vidmem, use the common copy path below */ |
727 | * the common copy path below */ | ||
728 | err = copy_from_user(c->gpfifo.pipe, user_gpfifo, len); | 723 | err = copy_from_user(c->gpfifo.pipe, user_gpfifo, len); |
729 | if (err) | 724 | if (err) |
730 | return err; | 725 | return err; |
diff --git a/drivers/gpu/nvgpu/common/linux/debug_mm.c b/drivers/gpu/nvgpu/common/linux/debug_mm.c index f2c42e70..5a3510bf 100644 --- a/drivers/gpu/nvgpu/common/linux/debug_mm.c +++ b/drivers/gpu/nvgpu/common/linux/debug_mm.c | |||
@@ -19,8 +19,4 @@ | |||
19 | 19 | ||
20 | void gk20a_mm_debugfs_init(struct gk20a *g) | 20 | void gk20a_mm_debugfs_init(struct gk20a *g) |
21 | { | 21 | { |
22 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
23 | |||
24 | debugfs_create_bool("force_pramin", 0664, l->debugfs, | ||
25 | &g->mm.force_pramin); | ||
26 | } | 22 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c index 015295ba..93925803 100644 --- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c | |||
@@ -54,7 +54,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) | |||
54 | PAGE_KERNEL : | 54 | PAGE_KERNEL : |
55 | pgprot_writecombine(PAGE_KERNEL); | 55 | pgprot_writecombine(PAGE_KERNEL); |
56 | 56 | ||
57 | if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) | 57 | if (mem->aperture != APERTURE_SYSMEM) |
58 | return 0; | 58 | return 0; |
59 | 59 | ||
60 | /* | 60 | /* |
@@ -91,7 +91,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) | |||
91 | 91 | ||
92 | void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) | 92 | void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) |
93 | { | 93 | { |
94 | if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) | 94 | if (mem->aperture != APERTURE_SYSMEM) |
95 | return; | 95 | return; |
96 | 96 | ||
97 | /* | 97 | /* |
@@ -134,7 +134,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) | |||
134 | { | 134 | { |
135 | u32 data = 0; | 135 | u32 data = 0; |
136 | 136 | ||
137 | if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { | 137 | if (mem->aperture == APERTURE_SYSMEM) { |
138 | u32 *ptr = mem->cpu_va; | 138 | u32 *ptr = mem->cpu_va; |
139 | 139 | ||
140 | WARN_ON(!ptr); | 140 | WARN_ON(!ptr); |
@@ -142,7 +142,7 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) | |||
142 | #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM | 142 | #ifdef CONFIG_TEGRA_SIMULATION_PLATFORM |
143 | nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); | 143 | nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); |
144 | #endif | 144 | #endif |
145 | } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { | 145 | } else if (mem->aperture == APERTURE_VIDMEM) { |
146 | u32 value; | 146 | u32 value; |
147 | u32 *p = &value; | 147 | u32 *p = &value; |
148 | 148 | ||
@@ -170,7 +170,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, | |||
170 | WARN_ON(offset & 3); | 170 | WARN_ON(offset & 3); |
171 | WARN_ON(size & 3); | 171 | WARN_ON(size & 3); |
172 | 172 | ||
173 | if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { | 173 | if (mem->aperture == APERTURE_SYSMEM) { |
174 | u8 *src = (u8 *)mem->cpu_va + offset; | 174 | u8 *src = (u8 *)mem->cpu_va + offset; |
175 | 175 | ||
176 | WARN_ON(!mem->cpu_va); | 176 | WARN_ON(!mem->cpu_va); |
@@ -180,7 +180,7 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem, | |||
180 | nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", | 180 | nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x ... [%d bytes]", |
181 | src, *dest, size); | 181 | src, *dest, size); |
182 | #endif | 182 | #endif |
183 | } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { | 183 | } else if (mem->aperture == APERTURE_VIDMEM) { |
184 | u32 *dest_u32 = dest; | 184 | u32 *dest_u32 = dest; |
185 | 185 | ||
186 | nvgpu_pramin_access_batched(g, mem, offset, size, | 186 | nvgpu_pramin_access_batched(g, mem, offset, size, |
@@ -210,7 +210,7 @@ static void pramin_access_batch_wr_n(struct gk20a *g, u32 start, u32 words, u32 | |||
210 | 210 | ||
211 | void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) | 211 | void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) |
212 | { | 212 | { |
213 | if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { | 213 | if (mem->aperture == APERTURE_SYSMEM) { |
214 | u32 *ptr = mem->cpu_va; | 214 | u32 *ptr = mem->cpu_va; |
215 | 215 | ||
216 | WARN_ON(!ptr); | 216 | WARN_ON(!ptr); |
@@ -218,7 +218,7 @@ void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data) | |||
218 | nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); | 218 | nvgpu_log(g, gpu_dbg_mem, " %p = 0x%x", ptr + w, data); |
219 | #endif | 219 | #endif |
220 | ptr[w] = data; | 220 | ptr[w] = data; |
221 | } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { | 221 | } else if (mem->aperture == APERTURE_VIDMEM) { |
222 | u32 value = data; | 222 | u32 value = data; |
223 | u32 *p = &value; | 223 | u32 *p = &value; |
224 | 224 | ||
@@ -243,7 +243,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | |||
243 | WARN_ON(offset & 3); | 243 | WARN_ON(offset & 3); |
244 | WARN_ON(size & 3); | 244 | WARN_ON(size & 3); |
245 | 245 | ||
246 | if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { | 246 | if (mem->aperture == APERTURE_SYSMEM) { |
247 | u8 *dest = (u8 *)mem->cpu_va + offset; | 247 | u8 *dest = (u8 *)mem->cpu_va + offset; |
248 | 248 | ||
249 | WARN_ON(!mem->cpu_va); | 249 | WARN_ON(!mem->cpu_va); |
@@ -253,7 +253,7 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | |||
253 | dest, *src, size); | 253 | dest, *src, size); |
254 | #endif | 254 | #endif |
255 | memcpy(dest, src, size); | 255 | memcpy(dest, src, size); |
256 | } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { | 256 | } else if (mem->aperture == APERTURE_VIDMEM) { |
257 | u32 *src_u32 = src; | 257 | u32 *src_u32 = src; |
258 | 258 | ||
259 | nvgpu_pramin_access_batched(g, mem, offset, size, | 259 | nvgpu_pramin_access_batched(g, mem, offset, size, |
@@ -290,7 +290,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | |||
290 | 290 | ||
291 | c &= 0xff; | 291 | c &= 0xff; |
292 | 292 | ||
293 | if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { | 293 | if (mem->aperture == APERTURE_SYSMEM) { |
294 | u8 *dest = (u8 *)mem->cpu_va + offset; | 294 | u8 *dest = (u8 *)mem->cpu_va + offset; |
295 | 295 | ||
296 | WARN_ON(!mem->cpu_va); | 296 | WARN_ON(!mem->cpu_va); |
@@ -300,7 +300,7 @@ void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, | |||
300 | dest, c, size); | 300 | dest, c, size); |
301 | #endif | 301 | #endif |
302 | memset(dest, c, size); | 302 | memset(dest, c, size); |
303 | } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) { | 303 | } else if (mem->aperture == APERTURE_VIDMEM) { |
304 | u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); | 304 | u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24); |
305 | u32 *p = &repeat_value; | 305 | u32 *p = &repeat_value; |
306 | 306 | ||