summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-21 15:55:35 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:53 -0400
commitc9665079d7b12f22a847c62587724b4ee120ca6e (patch)
tree7882bd08193db4c34b3b8ad7df7013339da2fba1 /drivers/gpu/nvgpu/common
parentb69020bff5dfa69cad926c9374cdbe9a62509ffd (diff)
gpu: nvgpu: rename mem_desc to nvgpu_mem
Renaming was done with the following command: $ find -type f | \ xargs sed -i 's/struct mem_desc/struct nvgpu_mem/g' Also rename mem_desc.[ch] to nvgpu_mem.[ch]. JIRA NVGPU-12 Change-Id: I69395758c22a56aa01e3dffbcded70a729bf559a Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1325547 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c (renamed from drivers/gpu/nvgpu/common/linux/mem_desc.c)32
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c2
2 files changed, 17 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/mem_desc.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index 02c3d1a9..b282d050 100644
--- a/drivers/gpu/nvgpu/common/linux/mem_desc.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -14,7 +14,7 @@
14 * along with this program. If not, see <http://www.gnu.org/licenses/>. 14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */ 15 */
16 16
17#include <nvgpu/mem_desc.h> 17#include <nvgpu/nvgpu_mem.h>
18#include <nvgpu/page_allocator.h> 18#include <nvgpu/page_allocator.h>
19 19
20#include "gk20a/gk20a.h" 20#include "gk20a/gk20a.h"
@@ -36,14 +36,14 @@ u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture,
36 return 0; 36 return 0;
37} 37}
38 38
39u32 nvgpu_aperture_mask(struct gk20a *g, struct mem_desc *mem, 39u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem,
40 u32 sysmem_mask, u32 vidmem_mask) 40 u32 sysmem_mask, u32 vidmem_mask)
41{ 41{
42 return __nvgpu_aperture_mask(g, mem->aperture, 42 return __nvgpu_aperture_mask(g, mem->aperture,
43 sysmem_mask, vidmem_mask); 43 sysmem_mask, vidmem_mask);
44} 44}
45 45
46int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem) 46int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
47{ 47{
48 void *cpu_va; 48 void *cpu_va;
49 49
@@ -66,7 +66,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct mem_desc *mem)
66 return 0; 66 return 0;
67} 67}
68 68
69void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem) 69void nvgpu_mem_end(struct gk20a *g, struct nvgpu_mem *mem)
70{ 70{
71 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin) 71 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
72 return; 72 return;
@@ -75,7 +75,7 @@ void nvgpu_mem_end(struct gk20a *g, struct mem_desc *mem)
75 mem->cpu_va = NULL; 75 mem->cpu_va = NULL;
76} 76}
77 77
78u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w) 78u32 nvgpu_mem_rd32(struct gk20a *g, struct nvgpu_mem *mem, u32 w)
79{ 79{
80 u32 data = 0; 80 u32 data = 0;
81 81
@@ -97,19 +97,19 @@ u32 nvgpu_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
97 data = value; 97 data = value;
98 98
99 } else { 99 } else {
100 WARN_ON("Accessing unallocated mem_desc"); 100 WARN_ON("Accessing unallocated nvgpu_mem");
101 } 101 }
102 102
103 return data; 103 return data;
104} 104}
105 105
106u32 nvgpu_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset) 106u32 nvgpu_mem_rd(struct gk20a *g, struct nvgpu_mem *mem, u32 offset)
107{ 107{
108 WARN_ON(offset & 3); 108 WARN_ON(offset & 3);
109 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32)); 109 return nvgpu_mem_rd32(g, mem, offset / sizeof(u32));
110} 110}
111 111
112void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem, 112void nvgpu_mem_rd_n(struct gk20a *g, struct nvgpu_mem *mem,
113 u32 offset, void *dest, u32 size) 113 u32 offset, void *dest, u32 size)
114{ 114{
115 WARN_ON(offset & 3); 115 WARN_ON(offset & 3);
@@ -131,11 +131,11 @@ void nvgpu_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
131 nvgpu_pramin_access_batched(g, mem, offset, size, 131 nvgpu_pramin_access_batched(g, mem, offset, size,
132 pramin_access_batch_rd_n, &dest_u32); 132 pramin_access_batch_rd_n, &dest_u32);
133 } else { 133 } else {
134 WARN_ON("Accessing unallocated mem_desc"); 134 WARN_ON("Accessing unallocated nvgpu_mem");
135 } 135 }
136} 136}
137 137
138void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data) 138void nvgpu_mem_wr32(struct gk20a *g, struct nvgpu_mem *mem, u32 w, u32 data)
139{ 139{
140 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) { 140 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
141 u32 *ptr = mem->cpu_va; 141 u32 *ptr = mem->cpu_va;
@@ -154,17 +154,17 @@ void nvgpu_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
154 if (!mem->skip_wmb) 154 if (!mem->skip_wmb)
155 wmb(); 155 wmb();
156 } else { 156 } else {
157 WARN_ON("Accessing unallocated mem_desc"); 157 WARN_ON("Accessing unallocated nvgpu_mem");
158 } 158 }
159} 159}
160 160
161void nvgpu_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data) 161void nvgpu_mem_wr(struct gk20a *g, struct nvgpu_mem *mem, u32 offset, u32 data)
162{ 162{
163 WARN_ON(offset & 3); 163 WARN_ON(offset & 3);
164 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data); 164 nvgpu_mem_wr32(g, mem, offset / sizeof(u32), data);
165} 165}
166 166
167void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset, 167void nvgpu_mem_wr_n(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
168 void *src, u32 size) 168 void *src, u32 size)
169{ 169{
170 WARN_ON(offset & 3); 170 WARN_ON(offset & 3);
@@ -188,11 +188,11 @@ void nvgpu_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
188 if (!mem->skip_wmb) 188 if (!mem->skip_wmb)
189 wmb(); 189 wmb();
190 } else { 190 } else {
191 WARN_ON("Accessing unallocated mem_desc"); 191 WARN_ON("Accessing unallocated nvgpu_mem");
192 } 192 }
193} 193}
194 194
195void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset, 195void nvgpu_memset(struct gk20a *g, struct nvgpu_mem *mem, u32 offset,
196 u32 c, u32 size) 196 u32 c, u32 size)
197{ 197{
198 WARN_ON(offset & 3); 198 WARN_ON(offset & 3);
@@ -220,6 +220,6 @@ void nvgpu_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
220 if (!mem->skip_wmb) 220 if (!mem->skip_wmb)
221 wmb(); 221 wmb();
222 } else { 222 } else {
223 WARN_ON("Accessing unallocated mem_desc"); 223 WARN_ON("Accessing unallocated nvgpu_mem");
224 } 224 }
225} 225}
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index aa732368..378711fc 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -80,7 +80,7 @@ void pramin_access_batch_set(struct gk20a *g, u32 start, u32 words, u32 **arg)
80 * This same loop is used for read/write/memset. Offset and size in bytes. 80 * This same loop is used for read/write/memset. Offset and size in bytes.
81 * One call to "loop" is done per range, with "arg" supplied. 81 * One call to "loop" is done per range, with "arg" supplied.
82 */ 82 */
83void nvgpu_pramin_access_batched(struct gk20a *g, struct mem_desc *mem, 83void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
84 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg) 84 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
85{ 85{
86 struct nvgpu_page_alloc *alloc = NULL; 86 struct nvgpu_page_alloc *alloc = NULL;