diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/os/linux/nvgpu_mem.c | 65 |
3 files changed, 2 insertions, 81 deletions
diff --git a/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c b/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c index f8feb6a2..7f3bf9f1 100644 --- a/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c +++ b/drivers/gpu/nvgpu/common/posix/posix-nvgpu_mem.c | |||
@@ -31,16 +31,6 @@ | |||
31 | * can emulate a lot of the DMA mem functionality for unit testing purposes. | 31 | * can emulate a lot of the DMA mem functionality for unit testing purposes. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) | ||
35 | { | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) | ||
40 | { | ||
41 | return; | ||
42 | } | ||
43 | |||
44 | u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) | 34 | u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w) |
45 | { | 35 | { |
46 | u32 *mem_ptr = (u32 *)mem->cpu_va; | 36 | u32 *mem_ptr = (u32 *)mem->cpu_va; |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h index 93fce81e..dba7c320 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h +++ b/drivers/gpu/nvgpu/include/nvgpu/nvgpu_mem.h | |||
@@ -319,14 +319,10 @@ int nvgpu_mem_create_from_mem(struct gk20a *g, | |||
319 | void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem); | 319 | void __nvgpu_mem_free_vidmem_alloc(struct gk20a *g, struct nvgpu_mem *vidmem); |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * Buffer accessors - wrap between begin() and end() if there is no permanent | 322 | * Buffer accessors. Sysmem buffers always have a CPU mapping and vidmem |
323 | * kernel mapping for this buffer. | 323 | * buffers are accessed via PRAMIN. |
324 | */ | 324 | */ |
325 | 325 | ||
326 | int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem); | ||
327 | /* nop for null mem, like with free() or vunmap() */ | ||
328 | void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem); | ||
329 | |||
330 | /* word-indexed offset */ | 326 | /* word-indexed offset */ |
331 | u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w); | 327 | u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w); |
332 | /* byte offset (32b-aligned) */ | 328 | /* byte offset (32b-aligned) */ |
diff --git a/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c index 763d6506..04b2afa7 100644 --- a/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c +++ b/drivers/gpu/nvgpu/os/linux/nvgpu_mem.c | |||
@@ -48,71 +48,6 @@ static u64 __nvgpu_sgl_phys(struct gk20a *g, struct nvgpu_sgl *sgl) | |||
48 | return ipa; | 48 | return ipa; |
49 | } | 49 | } |
50 | 50 | ||
51 | int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem) | ||
52 | { | ||
53 | void *cpu_va; | ||
54 | pgprot_t prot = nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM) ? | ||
55 | PAGE_KERNEL : | ||
56 | pgprot_writecombine(PAGE_KERNEL); | ||
57 | |||
58 | if (mem->aperture != APERTURE_SYSMEM) | ||
59 | return 0; | ||
60 | |||
61 | /* | ||
62 | * WAR for bug 2040115: we already will always have a coherent vmap() | ||
63 | * for all sysmem buffers. The prot settings are left alone since | ||
64 | * eventually this should be deleted. | ||
65 | */ | ||
66 | if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) | ||
67 | return 0; | ||
68 | |||
69 | /* | ||
70 | * A CPU mapping is implicitly made for all SYSMEM DMA allocations that | ||
71 | * don't have NVGPU_DMA_NO_KERNEL_MAPPING. Thus we don't need to make | ||
72 | * another CPU mapping. | ||
73 | */ | ||
74 | if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) | ||
75 | return 0; | ||
76 | |||
77 | if (WARN_ON(mem->cpu_va)) { | ||
78 | nvgpu_warn(g, "nested"); | ||
79 | return -EBUSY; | ||
80 | } | ||
81 | |||
82 | cpu_va = vmap(mem->priv.pages, | ||
83 | PAGE_ALIGN(mem->size) >> PAGE_SHIFT, | ||
84 | 0, prot); | ||
85 | |||
86 | if (WARN_ON(!cpu_va)) | ||
87 | return -ENOMEM; | ||
88 | |||
89 | mem->cpu_va = cpu_va; | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem) | ||
94 | { | ||
95 | if (mem->aperture != APERTURE_SYSMEM) | ||
96 | return; | ||
97 | |||
98 | /* | ||
99 | * WAR for bug 2040115: skip this since the map will be taken care of | ||
100 | * during the free in the DMA API. | ||
101 | */ | ||
102 | if (nvgpu_is_enabled(g, NVGPU_USE_COHERENT_SYSMEM)) | ||
103 | return; | ||
104 | |||
105 | /* | ||
106 | * Similar to nvgpu_mem_begin() we don't need to unmap the CPU mapping | ||
107 | * already made by the DMA API. | ||
108 | */ | ||
109 | if (!(mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING)) | ||
110 | return; | ||
111 | |||
112 | vunmap(mem->cpu_va); | ||
113 | mem->cpu_va = NULL; | ||
114 | } | ||
115 | |||
116 | static void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg) | 51 | static void pramin_access_batch_rd_n(struct gk20a *g, u32 start, u32 words, u32 **arg) |
117 | { | 52 | { |
118 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | 53 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); |