summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-06 18:30:01 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-04-20 19:14:32 -0400
commite32f62fadfcde413bcd9b5af61ad884e27ba2bf1 (patch)
treeeff606a0826841eae6ade5906acd9da589d1179a /drivers/gpu/nvgpu/common
parent52bd58b560d0b3b49c03ef5c2637b67adeac8193 (diff)
gpu: nvgpu: Move Linux nvgpu_mem fields
Hide the Linux specific nvgpu_mem fields so that in subsequent patches core code can instead of using struct sg_table it can use mem_desc. Routines for accessing system specific fields will be added as needed. This is the first step in a fairly major overhaul of the GMMU mapping routines. There are numerous issues with the current design (or lack there of): massively coupled code, system dependencies, disorganization, etc. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I2e7d3ae3a07468cfc17c1c642d28ed1b0952474d Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1464076 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c60
-rw-r--r--drivers/gpu/nvgpu/common/linux/nvgpu_mem.c2
-rw-r--r--drivers/gpu/nvgpu/common/pramin.c2
-rw-r--r--drivers/gpu/nvgpu/common/semaphore.c4
4 files changed, 35 insertions, 33 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index 2a75ad13..832d0f47 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -107,10 +107,10 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
107 nvgpu_dma_flags_to_attrs(&dma_attrs, flags); 107 nvgpu_dma_flags_to_attrs(&dma_attrs, flags);
108 108
109 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 109 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
110 mem->pages = dma_alloc_attrs(d, 110 mem->priv.pages = dma_alloc_attrs(d,
111 size, &iova, GFP_KERNEL, 111 size, &iova, GFP_KERNEL,
112 __DMA_ATTR(dma_attrs)); 112 __DMA_ATTR(dma_attrs));
113 if (!mem->pages) 113 if (!mem->priv.pages)
114 return -ENOMEM; 114 return -ENOMEM;
115 } else { 115 } else {
116 mem->cpu_va = dma_alloc_attrs(d, 116 mem->cpu_va = dma_alloc_attrs(d,
@@ -126,10 +126,12 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
126 } 126 }
127 127
128 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING) 128 if (flags & NVGPU_DMA_NO_KERNEL_MAPPING)
129 err = gk20a_get_sgtable_from_pages(d, &mem->sgt, mem->pages, 129 err = gk20a_get_sgtable_from_pages(d, &mem->priv.sgt,
130 mem->priv.pages,
130 iova, size); 131 iova, size);
131 else { 132 else {
132 err = gk20a_get_sgtable(d, &mem->sgt, mem->cpu_va, iova, size); 133 err = gk20a_get_sgtable(d, &mem->priv.sgt, mem->cpu_va,
134 iova, size);
133 memset(mem->cpu_va, 0, size); 135 memset(mem->cpu_va, 0, size);
134 } 136 }
135 if (err) 137 if (err)
@@ -137,7 +139,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
137 139
138 mem->size = size; 140 mem->size = size;
139 mem->aperture = APERTURE_SYSMEM; 141 mem->aperture = APERTURE_SYSMEM;
140 mem->flags = flags; 142 mem->priv.flags = flags;
141 143
142 gk20a_dbg_fn("done"); 144 gk20a_dbg_fn("done");
143 145
@@ -146,7 +148,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
146fail_free: 148fail_free:
147 dma_free_coherent(d, size, mem->cpu_va, iova); 149 dma_free_coherent(d, size, mem->cpu_va, iova);
148 mem->cpu_va = NULL; 150 mem->cpu_va = NULL;
149 mem->sgt = NULL; 151 mem->priv.sgt = NULL;
150 return err; 152 return err;
151} 153}
152 154
@@ -204,23 +206,23 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
204 else 206 else
205 mem->fixed = false; 207 mem->fixed = false;
206 208
207 mem->sgt = nvgpu_kzalloc(g, sizeof(struct sg_table)); 209 mem->priv.sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
208 if (!mem->sgt) { 210 if (!mem->priv.sgt) {
209 err = -ENOMEM; 211 err = -ENOMEM;
210 goto fail_physfree; 212 goto fail_physfree;
211 } 213 }
212 214
213 err = sg_alloc_table(mem->sgt, 1, GFP_KERNEL); 215 err = sg_alloc_table(mem->priv.sgt, 1, GFP_KERNEL);
214 if (err) 216 if (err)
215 goto fail_kfree; 217 goto fail_kfree;
216 218
217 set_vidmem_page_alloc(mem->sgt->sgl, addr); 219 set_vidmem_page_alloc(mem->priv.sgt->sgl, addr);
218 sg_set_page(mem->sgt->sgl, NULL, size, 0); 220 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0);
219 221
220 mem->size = size; 222 mem->size = size;
221 mem->aperture = APERTURE_VIDMEM; 223 mem->aperture = APERTURE_VIDMEM;
222 mem->allocator = vidmem_alloc; 224 mem->allocator = vidmem_alloc;
223 mem->flags = flags; 225 mem->priv.flags = flags;
224 226
225 nvgpu_init_list_node(&mem->clear_list_entry); 227 nvgpu_init_list_node(&mem->clear_list_entry);
226 228
@@ -229,7 +231,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
229 return 0; 231 return 0;
230 232
231fail_kfree: 233fail_kfree:
232 nvgpu_kfree(g, mem->sgt); 234 nvgpu_kfree(g, mem->priv.sgt);
233fail_physfree: 235fail_physfree:
234 nvgpu_free(&g->mm.vidmem.allocator, addr); 236 nvgpu_free(&g->mm.vidmem.allocator, addr);
235 return err; 237 return err;
@@ -283,7 +285,7 @@ int nvgpu_dma_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
283 if (err) 285 if (err)
284 return err; 286 return err;
285 287
286 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, 288 mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0,
287 gk20a_mem_flag_none, false, 289 gk20a_mem_flag_none, false,
288 mem->aperture); 290 mem->aperture);
289 if (!mem->gpu_va) { 291 if (!mem->gpu_va) {
@@ -313,7 +315,7 @@ int nvgpu_dma_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
313 if (err) 315 if (err)
314 return err; 316 return err;
315 317
316 mem->gpu_va = gk20a_gmmu_map(vm, &mem->sgt, size, 0, 318 mem->gpu_va = gk20a_gmmu_map(vm, &mem->priv.sgt, size, 0,
317 gk20a_mem_flag_none, false, 319 gk20a_mem_flag_none, false,
318 mem->aperture); 320 mem->aperture);
319 if (!mem->gpu_va) { 321 if (!mem->gpu_va) {
@@ -332,31 +334,31 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
332{ 334{
333 struct device *d = dev_from_gk20a(g); 335 struct device *d = dev_from_gk20a(g);
334 336
335 if (mem->cpu_va || mem->pages) { 337 if (mem->cpu_va || mem->priv.pages) {
336 if (mem->flags) { 338 if (mem->priv.flags) {
337 DEFINE_DMA_ATTRS(dma_attrs); 339 DEFINE_DMA_ATTRS(dma_attrs);
338 340
339 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->flags); 341 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
340 342
341 if (mem->flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 343 if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
342 dma_free_attrs(d, mem->size, mem->pages, 344 dma_free_attrs(d, mem->size, mem->priv.pages,
343 sg_dma_address(mem->sgt->sgl), 345 sg_dma_address(mem->priv.sgt->sgl),
344 __DMA_ATTR(dma_attrs)); 346 __DMA_ATTR(dma_attrs));
345 } else { 347 } else {
346 dma_free_attrs(d, mem->size, mem->cpu_va, 348 dma_free_attrs(d, mem->size, mem->cpu_va,
347 sg_dma_address(mem->sgt->sgl), 349 sg_dma_address(mem->priv.sgt->sgl),
348 __DMA_ATTR(dma_attrs)); 350 __DMA_ATTR(dma_attrs));
349 } 351 }
350 } else { 352 } else {
351 dma_free_coherent(d, mem->size, mem->cpu_va, 353 dma_free_coherent(d, mem->size, mem->cpu_va,
352 sg_dma_address(mem->sgt->sgl)); 354 sg_dma_address(mem->priv.sgt->sgl));
353 } 355 }
354 mem->cpu_va = NULL; 356 mem->cpu_va = NULL;
355 mem->pages = NULL; 357 mem->priv.pages = NULL;
356 } 358 }
357 359
358 if (mem->sgt) 360 if (mem->priv.sgt)
359 gk20a_free_sgtable(g, &mem->sgt); 361 gk20a_free_sgtable(g, &mem->priv.sgt);
360 362
361 mem->size = 0; 363 mem->size = 0;
362 mem->aperture = APERTURE_INVALID; 364 mem->aperture = APERTURE_INVALID;
@@ -368,7 +370,7 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
368 bool was_empty; 370 bool was_empty;
369 371
370 /* Sanity check - only this supported when allocating. */ 372 /* Sanity check - only this supported when allocating. */
371 WARN_ON(mem->flags != NVGPU_DMA_NO_KERNEL_MAPPING); 373 WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING);
372 374
373 if (mem->user_mem) { 375 if (mem->user_mem) {
374 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); 376 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
@@ -385,8 +387,8 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
385 } else { 387 } else {
386 nvgpu_memset(g, mem, 0, 0, mem->size); 388 nvgpu_memset(g, mem, 0, 0, mem->size);
387 nvgpu_free(mem->allocator, 389 nvgpu_free(mem->allocator,
388 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 390 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
389 gk20a_free_sgtable(g, &mem->sgt); 391 gk20a_free_sgtable(g, &mem->priv.sgt);
390 392
391 mem->size = 0; 393 mem->size = 0;
392 mem->aperture = APERTURE_INVALID; 394 mem->aperture = APERTURE_INVALID;
diff --git a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
index eb214aad..bb19dd61 100644
--- a/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
+++ b/drivers/gpu/nvgpu/common/linux/nvgpu_mem.c
@@ -57,7 +57,7 @@ int nvgpu_mem_begin(struct gk20a *g, struct nvgpu_mem *mem)
57 return -EBUSY; 57 return -EBUSY;
58 } 58 }
59 59
60 cpu_va = vmap(mem->pages, 60 cpu_va = vmap(mem->priv.pages,
61 PAGE_ALIGN(mem->size) >> PAGE_SHIFT, 61 PAGE_ALIGN(mem->size) >> PAGE_SHIFT,
62 0, pgprot_writecombine(PAGE_KERNEL)); 62 0, pgprot_writecombine(PAGE_KERNEL));
63 63
diff --git a/drivers/gpu/nvgpu/common/pramin.c b/drivers/gpu/nvgpu/common/pramin.c
index 378711fc..688e5ce8 100644
--- a/drivers/gpu/nvgpu/common/pramin.c
+++ b/drivers/gpu/nvgpu/common/pramin.c
@@ -87,7 +87,7 @@ void nvgpu_pramin_access_batched(struct gk20a *g, struct nvgpu_mem *mem,
87 struct page_alloc_chunk *chunk = NULL; 87 struct page_alloc_chunk *chunk = NULL;
88 u32 byteoff, start_reg, until_end, n; 88 u32 byteoff, start_reg, until_end, n;
89 89
90 alloc = get_vidmem_page_alloc(mem->sgt->sgl); 90 alloc = get_vidmem_page_alloc(mem->priv.sgt->sgl);
91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 91 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks,
92 page_alloc_chunk, list_entry) { 92 page_alloc_chunk, list_entry) {
93 if (offset >= chunk->length) 93 if (offset >= chunk->length)
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c
index 9e437410..bf7b6348 100644
--- a/drivers/gpu/nvgpu/common/semaphore.c
+++ b/drivers/gpu/nvgpu/common/semaphore.c
@@ -60,7 +60,7 @@ static int __nvgpu_semaphore_sea_grow(struct nvgpu_semaphore_sea *sea)
60 if (ret) 60 if (ret)
61 goto out; 61 goto out;
62 62
63 sea->ro_sg_table = sea->sea_mem.sgt; 63 sea->ro_sg_table = sea->sea_mem.priv.sgt;
64 sea->size = SEMAPHORE_POOL_COUNT; 64 sea->size = SEMAPHORE_POOL_COUNT;
65 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE; 65 sea->map_size = SEMAPHORE_POOL_COUNT * PAGE_SIZE;
66 66
@@ -154,7 +154,7 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
154 154
155 page_idx = (unsigned long)ret; 155 page_idx = (unsigned long)ret;
156 156
157 p->page = sea->sea_mem.pages[page_idx]; 157 p->page = sea->sea_mem.priv.pages[page_idx];
158 p->ro_sg_table = sea->ro_sg_table; 158 p->ro_sg_table = sea->ro_sg_table;
159 p->page_idx = page_idx; 159 p->page_idx = page_idx;
160 p->sema_sea = sea; 160 p->sema_sea = sea;