summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 86d8bec9..4a4429dc 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -21,8 +21,11 @@
21#include <nvgpu/lock.h> 21#include <nvgpu/lock.h>
22#include <nvgpu/rbtree.h> 22#include <nvgpu/rbtree.h>
23#include <nvgpu/vm_area.h> 23#include <nvgpu/vm_area.h>
24#include <nvgpu/nvgpu_mem.h>
24#include <nvgpu/page_allocator.h> 25#include <nvgpu/page_allocator.h>
25 26
27#include <nvgpu/linux/nvgpu_mem.h>
28
26#include "gk20a/gk20a.h" 29#include "gk20a/gk20a.h"
27#include "gk20a/mm_gk20a.h" 30#include "gk20a/mm_gk20a.h"
28#include "gk20a/kind_gk20a.h" 31#include "gk20a/kind_gk20a.h"
@@ -66,17 +69,19 @@ static u64 nvgpu_get_buffer_alignment(struct gk20a *g, struct scatterlist *sgl,
66 69
67 if (aperture == APERTURE_VIDMEM) { 70 if (aperture == APERTURE_VIDMEM) {
68 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl); 71 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
69 struct page_alloc_chunk *chunk = NULL; 72 struct nvgpu_mem_sgl *sgl_vid = alloc->sgl;
70 73
71 nvgpu_list_for_each_entry(chunk, &alloc->alloc_chunks, 74 while (sgl_vid) {
72 page_alloc_chunk, list_entry) { 75 chunk_align = 1ULL <<
73 chunk_align = 1ULL << __ffs(chunk->base | 76 __ffs(nvgpu_mem_sgl_phys(sgl_vid) |
74 chunk->length); 77 nvgpu_mem_sgl_length(sgl_vid));
75 78
76 if (align) 79 if (align)
77 align = min(align, chunk_align); 80 align = min(align, chunk_align);
78 else 81 else
79 align = chunk_align; 82 align = chunk_align;
83
84 sgl_vid = nvgpu_mem_sgl_next(sgl_vid);
80 } 85 }
81 86
82 return align; 87 return align;
@@ -237,6 +242,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
237 struct nvgpu_vm_area *vm_area = NULL; 242 struct nvgpu_vm_area *vm_area = NULL;
238 u32 ctag_offset; 243 u32 ctag_offset;
239 enum nvgpu_aperture aperture; 244 enum nvgpu_aperture aperture;
245 struct nvgpu_mem_sgl *nvgpu_sgl;
240 246
241 /* 247 /*
242 * The kind used as part of the key for map caching. HW may 248 * The kind used as part of the key for map caching. HW may
@@ -393,9 +399,12 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
393 ctag_offset += buffer_offset >> 399 ctag_offset += buffer_offset >>
394 ilog2(g->ops.fb.compression_page_size(g)); 400 ilog2(g->ops.fb.compression_page_size(g));
395 401
402 nvgpu_sgl = nvgpu_mem_sgl_create(g, bfr.sgt);
403
396 /* update gmmu ptes */ 404 /* update gmmu ptes */
397 map_offset = g->ops.mm.gmmu_map(vm, map_offset, 405 map_offset = g->ops.mm.gmmu_map(vm,
398 bfr.sgt, 406 map_offset,
407 nvgpu_sgl,
399 buffer_offset, /* sg offset */ 408 buffer_offset, /* sg offset */
400 mapping_size, 409 mapping_size,
401 bfr.pgsz_idx, 410 bfr.pgsz_idx,
@@ -410,6 +419,8 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
410 if (!map_offset) 419 if (!map_offset)
411 goto clean_up; 420 goto clean_up;
412 421
422 nvgpu_mem_sgl_free(g, nvgpu_sgl);
423
413 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer)); 424 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
414 if (!mapped_buffer) { 425 if (!mapped_buffer) {
415 nvgpu_warn(g, "oom allocating tracking buffer"); 426 nvgpu_warn(g, "oom allocating tracking buffer");