summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r--drivers/gpu/nvgpu/common/linux/dma.c27
1 files changed, 20 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c
index cd6ad1b2..13c1c347 100644
--- a/drivers/gpu/nvgpu/common/linux/dma.c
+++ b/drivers/gpu/nvgpu/common/linux/dma.c
@@ -112,6 +112,13 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
112 112
113 gk20a_dbg_fn(""); 113 gk20a_dbg_fn("");
114 114
115 /*
116 * Save the old size but for actual allocation purposes the size is
117 * going to be page aligned.
118 */
119 mem->size = size;
120 size = PAGE_ALIGN(size);
121
115 if (flags) { 122 if (flags) {
116 DEFINE_DMA_ATTRS(dma_attrs); 123 DEFINE_DMA_ATTRS(dma_attrs);
117 124
@@ -148,7 +155,7 @@ int nvgpu_dma_alloc_flags_sys(struct gk20a *g, unsigned long flags,
148 if (err) 155 if (err)
149 goto fail_free; 156 goto fail_free;
150 157
151 mem->size = size; 158 mem->aligned_size = size;
152 mem->aperture = APERTURE_SYSMEM; 159 mem->aperture = APERTURE_SYSMEM;
153 mem->priv.flags = flags; 160 mem->priv.flags = flags;
154 161
@@ -188,6 +195,9 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
188 195
189 gk20a_dbg_fn(""); 196 gk20a_dbg_fn("");
190 197
198 mem->size = size;
199 size = PAGE_ALIGN(size);
200
191 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator)) 201 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
192 return -ENOSYS; 202 return -ENOSYS;
193 203
@@ -228,7 +238,7 @@ int nvgpu_dma_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
228 set_vidmem_page_alloc(mem->priv.sgt->sgl, addr); 238 set_vidmem_page_alloc(mem->priv.sgt->sgl, addr);
229 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0); 239 sg_set_page(mem->priv.sgt->sgl, NULL, size, 0);
230 240
231 mem->size = size; 241 mem->aligned_size = size;
232 mem->aperture = APERTURE_VIDMEM; 242 mem->aperture = APERTURE_VIDMEM;
233 mem->allocator = vidmem_alloc; 243 mem->allocator = vidmem_alloc;
234 mem->priv.flags = flags; 244 mem->priv.flags = flags;
@@ -352,16 +362,16 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
352 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags); 362 nvgpu_dma_flags_to_attrs(&dma_attrs, mem->priv.flags);
353 363
354 if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) { 364 if (mem->priv.flags & NVGPU_DMA_NO_KERNEL_MAPPING) {
355 dma_free_attrs(d, mem->size, mem->priv.pages, 365 dma_free_attrs(d, mem->aligned_size, mem->priv.pages,
356 sg_dma_address(mem->priv.sgt->sgl), 366 sg_dma_address(mem->priv.sgt->sgl),
357 __DMA_ATTR(dma_attrs)); 367 __DMA_ATTR(dma_attrs));
358 } else { 368 } else {
359 dma_free_attrs(d, mem->size, mem->cpu_va, 369 dma_free_attrs(d, mem->aligned_size, mem->cpu_va,
360 sg_dma_address(mem->priv.sgt->sgl), 370 sg_dma_address(mem->priv.sgt->sgl),
361 __DMA_ATTR(dma_attrs)); 371 __DMA_ATTR(dma_attrs));
362 } 372 }
363 } else { 373 } else {
364 dma_free_coherent(d, mem->size, mem->cpu_va, 374 dma_free_coherent(d, mem->aligned_size, mem->cpu_va,
365 sg_dma_address(mem->priv.sgt->sgl)); 375 sg_dma_address(mem->priv.sgt->sgl));
366 } 376 }
367 mem->cpu_va = NULL; 377 mem->cpu_va = NULL;
@@ -379,6 +389,7 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
379 nvgpu_free_sgtable(g, &mem->priv.sgt); 389 nvgpu_free_sgtable(g, &mem->priv.sgt);
380 390
381 mem->size = 0; 391 mem->size = 0;
392 mem->aligned_size = 0;
382 mem->aperture = APERTURE_INVALID; 393 mem->aperture = APERTURE_INVALID;
383} 394}
384 395
@@ -395,7 +406,8 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
395 was_empty = nvgpu_list_empty(&g->mm.vidmem.clear_list_head); 406 was_empty = nvgpu_list_empty(&g->mm.vidmem.clear_list_head);
396 nvgpu_list_add_tail(&mem->clear_list_entry, 407 nvgpu_list_add_tail(&mem->clear_list_entry,
397 &g->mm.vidmem.clear_list_head); 408 &g->mm.vidmem.clear_list_head);
398 atomic64_add(mem->size, &g->mm.vidmem.bytes_pending.atomic_var); 409 atomic64_add(mem->aligned_size,
410 &g->mm.vidmem.bytes_pending.atomic_var);
399 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); 411 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
400 412
401 if (was_empty) { 413 if (was_empty) {
@@ -403,12 +415,13 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
403 schedule_work(&g->mm.vidmem.clear_mem_worker); 415 schedule_work(&g->mm.vidmem.clear_mem_worker);
404 } 416 }
405 } else { 417 } else {
406 nvgpu_memset(g, mem, 0, 0, mem->size); 418 nvgpu_memset(g, mem, 0, 0, mem->aligned_size);
407 nvgpu_free(mem->allocator, 419 nvgpu_free(mem->allocator,
408 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl)); 420 (u64)get_vidmem_page_alloc(mem->priv.sgt->sgl));
409 nvgpu_free_sgtable(g, &mem->priv.sgt); 421 nvgpu_free_sgtable(g, &mem->priv.sgt);
410 422
411 mem->size = 0; 423 mem->size = 0;
424 mem->aligned_size = 0;
412 mem->aperture = APERTURE_INVALID; 425 mem->aperture = APERTURE_INVALID;
413 } 426 }
414#endif 427#endif