summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/linux/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-25 18:56:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-19 18:34:12 -0400
commit29cc82844e03b6f9f0e6801169b6fa0e72d56628 (patch)
treef616b6c651ce80765ee344aa33ca204c555e67f2 /drivers/gpu/nvgpu/common/linux/vm.c
parent014ace5a85f274de7debb4c6168d69c803445e19 (diff)
gpu: nvgpu: Split vm_area management into vm code
The vm_reserve_va_node struct is essentially a special VM area that can be used for sparse mappings and fixed mappings. The name of this struct is somewhat confusing (as node is typically used for list items). Though this struct is a part of a list it doesn't really make sense to call this a list item since it's much more. Based on that the struct has been renamed to nvgpu_vm_area to capture the actual use of the struct more accurately. This also moves all of the management code of vm areas to a new file devoted solely to vm_area management. Also add a brief overview of the VM architecture. This should help other people follow along the hierachy of ownership and lifetimes in the rather complex MM code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: If85e1cf868031d0dc265e7bed50b58a2aed2602e Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477744 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/linux/vm.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c
index 8b9d6f96..5470d9ee 100644
--- a/drivers/gpu/nvgpu/common/linux/vm.c
+++ b/drivers/gpu/nvgpu/common/linux/vm.c
@@ -20,6 +20,7 @@
20#include <nvgpu/log.h> 20#include <nvgpu/log.h>
21#include <nvgpu/lock.h> 21#include <nvgpu/lock.h>
22#include <nvgpu/rbtree.h> 22#include <nvgpu/rbtree.h>
23#include <nvgpu/vm_area.h>
23#include <nvgpu/page_allocator.h> 24#include <nvgpu/page_allocator.h>
24 25
25#include "gk20a/gk20a.h" 26#include "gk20a/gk20a.h"
@@ -196,7 +197,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
196 struct scatterlist *sgl; 197 struct scatterlist *sgl;
197 u64 ctag_map_win_size = 0; 198 u64 ctag_map_win_size = 0;
198 u32 ctag_map_win_ctagline = 0; 199 u32 ctag_map_win_ctagline = 0;
199 struct vm_reserved_va_node *va_node = NULL; 200 struct nvgpu_vm_area *vm_area = NULL;
200 u32 ctag_offset; 201 u32 ctag_offset;
201 enum nvgpu_aperture aperture; 202 enum nvgpu_aperture aperture;
202 203
@@ -256,9 +257,8 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
256 257
257 /* Check if we should use a fixed offset for mapping this buffer */ 258 /* Check if we should use a fixed offset for mapping this buffer */
258 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 259 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
259 err = validate_fixed_buffer(vm, &bfr, 260 err = nvgpu_vm_area_validate_buffer(vm, offset_align, mapping_size,
260 offset_align, mapping_size, 261 bfr.pgsz_idx, &vm_area);
261 &va_node);
262 if (err) 262 if (err)
263 goto clean_up; 263 goto clean_up;
264 264
@@ -376,10 +376,10 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm,
376 if (user_mapped) 376 if (user_mapped)
377 vm->num_user_mapped_buffers++; 377 vm->num_user_mapped_buffers++;
378 378
379 if (va_node) { 379 if (vm_area) {
380 nvgpu_list_add_tail(&mapped_buffer->buffer_list, 380 nvgpu_list_add_tail(&mapped_buffer->buffer_list,
381 &va_node->buffer_list_head); 381 &vm_area->buffer_list_head);
382 mapped_buffer->va_node = va_node; 382 mapped_buffer->vm_area = vm_area;
383 } 383 }
384 384
385 nvgpu_mutex_release(&vm->update_gmmu_lock); 385 nvgpu_mutex_release(&vm->update_gmmu_lock);