summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-15 18:54:16 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-06 21:14:42 -0400
commitfa4ecf5730a75269e85cc41c2ad2ee61307e72a9 (patch)
tree188ede95097acee261a21f563bf8f844d7874861 /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parent2ff3a9f374e6e7fb6c468789cf8e0213f2297bdf (diff)
gpu: nvgpu: Split mem_desc from MM code
Split the mem_desc code out from the MM code. This is to help simplify the MM code and make it easier to abstract the DMA allocation routines. JIRA NVGPU-12 Change-Id: I2ccb643efe6bbed80d1360a580ff5593acb407bd Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1323324 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c213
1 files changed, 3 insertions, 210 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 2fe76d80..e78eb941 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -56,21 +56,18 @@
56static void gk20a_vidmem_clear_mem_worker(struct work_struct *work); 56static void gk20a_vidmem_clear_mem_worker(struct work_struct *work);
57#endif 57#endif
58 58
59static inline void 59void set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr)
60set_vidmem_page_alloc(struct scatterlist *sgl, u64 addr)
61{ 60{
62 /* set bit 0 to indicate vidmem allocation */ 61 /* set bit 0 to indicate vidmem allocation */
63 sg_dma_address(sgl) = (addr | 1ULL); 62 sg_dma_address(sgl) = (addr | 1ULL);
64} 63}
65 64
66static inline bool 65bool is_vidmem_page_alloc(u64 addr)
67is_vidmem_page_alloc(u64 addr)
68{ 66{
69 return !!(addr & 1ULL); 67 return !!(addr & 1ULL);
70} 68}
71 69
72struct nvgpu_page_alloc * 70struct nvgpu_page_alloc *get_vidmem_page_alloc(struct scatterlist *sgl)
73get_vidmem_page_alloc(struct scatterlist *sgl)
74{ 71{
75 u64 addr; 72 u64 addr;
76 73
@@ -84,187 +81,6 @@ get_vidmem_page_alloc(struct scatterlist *sgl)
84 return (struct nvgpu_page_alloc *)(uintptr_t)addr; 81 return (struct nvgpu_page_alloc *)(uintptr_t)addr;
85} 82}
86 83
87int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem)
88{
89 void *cpu_va;
90
91 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
92 return 0;
93
94 if (WARN_ON(mem->cpu_va)) {
95 gk20a_warn(dev_from_gk20a(g), "nested %s", __func__);
96 return -EBUSY;
97 }
98
99 cpu_va = vmap(mem->pages,
100 PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
101 0, pgprot_writecombine(PAGE_KERNEL));
102
103 if (WARN_ON(!cpu_va))
104 return -ENOMEM;
105
106 mem->cpu_va = cpu_va;
107 return 0;
108}
109
110void gk20a_mem_end(struct gk20a *g, struct mem_desc *mem)
111{
112 if (mem->aperture != APERTURE_SYSMEM || g->mm.force_pramin)
113 return;
114
115 vunmap(mem->cpu_va);
116 mem->cpu_va = NULL;
117}
118
119u32 gk20a_mem_rd32(struct gk20a *g, struct mem_desc *mem, u32 w)
120{
121 u32 data = 0;
122
123 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
124 u32 *ptr = mem->cpu_va;
125
126 WARN_ON(!ptr);
127 data = ptr[w];
128#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
129 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
130#endif
131 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
132 u32 value;
133 u32 *p = &value;
134
135 nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
136 sizeof(u32), pramin_access_batch_rd_n, &p);
137
138 data = value;
139
140 } else {
141 WARN_ON("Accessing unallocated mem_desc");
142 }
143
144 return data;
145}
146
147u32 gk20a_mem_rd(struct gk20a *g, struct mem_desc *mem, u32 offset)
148{
149 WARN_ON(offset & 3);
150 return gk20a_mem_rd32(g, mem, offset / sizeof(u32));
151}
152
153void gk20a_mem_rd_n(struct gk20a *g, struct mem_desc *mem,
154 u32 offset, void *dest, u32 size)
155{
156 WARN_ON(offset & 3);
157 WARN_ON(size & 3);
158
159 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
160 u8 *src = (u8 *)mem->cpu_va + offset;
161
162 WARN_ON(!mem->cpu_va);
163 memcpy(dest, src, size);
164#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
165 if (size)
166 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
167 src, *dest, size);
168#endif
169 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
170 u32 *dest_u32 = dest;
171
172 nvgpu_pramin_access_batched(g, mem, offset, size,
173 pramin_access_batch_rd_n, &dest_u32);
174 } else {
175 WARN_ON("Accessing unallocated mem_desc");
176 }
177}
178
179void gk20a_mem_wr32(struct gk20a *g, struct mem_desc *mem, u32 w, u32 data)
180{
181 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
182 u32 *ptr = mem->cpu_va;
183
184 WARN_ON(!ptr);
185#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
186 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x", ptr + w, data);
187#endif
188 ptr[w] = data;
189 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
190 u32 value = data;
191 u32 *p = &value;
192
193 nvgpu_pramin_access_batched(g, mem, w * sizeof(u32),
194 sizeof(u32), pramin_access_batch_wr_n, &p);
195 if (!mem->skip_wmb)
196 wmb();
197 } else {
198 WARN_ON("Accessing unallocated mem_desc");
199 }
200}
201
202void gk20a_mem_wr(struct gk20a *g, struct mem_desc *mem, u32 offset, u32 data)
203{
204 WARN_ON(offset & 3);
205 gk20a_mem_wr32(g, mem, offset / sizeof(u32), data);
206}
207
208void gk20a_mem_wr_n(struct gk20a *g, struct mem_desc *mem, u32 offset,
209 void *src, u32 size)
210{
211 WARN_ON(offset & 3);
212 WARN_ON(size & 3);
213
214 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
215 u8 *dest = (u8 *)mem->cpu_va + offset;
216
217 WARN_ON(!mem->cpu_va);
218#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
219 if (size)
220 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x ... [%d bytes]",
221 dest, *src, size);
222#endif
223 memcpy(dest, src, size);
224 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
225 u32 *src_u32 = src;
226
227 nvgpu_pramin_access_batched(g, mem, offset, size,
228 pramin_access_batch_wr_n, &src_u32);
229 if (!mem->skip_wmb)
230 wmb();
231 } else {
232 WARN_ON("Accessing unallocated mem_desc");
233 }
234}
235
236void gk20a_memset(struct gk20a *g, struct mem_desc *mem, u32 offset,
237 u32 c, u32 size)
238{
239 WARN_ON(offset & 3);
240 WARN_ON(size & 3);
241 WARN_ON(c & ~0xff);
242
243 c &= 0xff;
244
245 if (mem->aperture == APERTURE_SYSMEM && !g->mm.force_pramin) {
246 u8 *dest = (u8 *)mem->cpu_va + offset;
247
248 WARN_ON(!mem->cpu_va);
249#ifdef CONFIG_TEGRA_SIMULATION_PLATFORM
250 if (size)
251 gk20a_dbg(gpu_dbg_mem, " %p = 0x%x [times %d]",
252 dest, c, size);
253#endif
254 memset(dest, c, size);
255 } else if (mem->aperture == APERTURE_VIDMEM || g->mm.force_pramin) {
256 u32 repeat_value = c | (c << 8) | (c << 16) | (c << 24);
257 u32 *p = &repeat_value;
258
259 nvgpu_pramin_access_batched(g, mem, offset, size,
260 pramin_access_batch_set, &p);
261 if (!mem->skip_wmb)
262 wmb();
263 } else {
264 WARN_ON("Accessing unallocated mem_desc");
265 }
266}
267
268/* 84/*
269 * GPU mapping life cycle 85 * GPU mapping life cycle
270 * ====================== 86 * ======================
@@ -3121,29 +2937,6 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
3121} 2937}
3122#endif 2938#endif
3123 2939
3124u32 __gk20a_aperture_mask(struct gk20a *g, enum gk20a_aperture aperture,
3125 u32 sysmem_mask, u32 vidmem_mask)
3126{
3127 switch (aperture) {
3128 case APERTURE_SYSMEM:
3129 /* sysmem for dgpus; some igpus consider system memory vidmem */
3130 return g->mm.vidmem_is_vidmem ? sysmem_mask : vidmem_mask;
3131 case APERTURE_VIDMEM:
3132 /* for dgpus only */
3133 return vidmem_mask;
3134 case APERTURE_INVALID:
3135 WARN_ON("Bad aperture");
3136 }
3137 return 0;
3138}
3139
3140u32 gk20a_aperture_mask(struct gk20a *g, struct mem_desc *mem,
3141 u32 sysmem_mask, u32 vidmem_mask)
3142{
3143 return __gk20a_aperture_mask(g, mem->aperture,
3144 sysmem_mask, vidmem_mask);
3145}
3146
3147int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, 2940int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
3148 struct mem_desc *mem) 2941 struct mem_desc *mem)
3149{ 2942{