From 29cc82844e03b6f9f0e6801169b6fa0e72d56628 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Tue, 25 Apr 2017 15:56:12 -0700 Subject: gpu: nvgpu: Split vm_area management into vm code The vm_reserve_va_node struct is essentially a special VM area that can be used for sparse mappings and fixed mappings. The name of this struct is somewhat confusing (as node is typically used for list items). Though this struct is a part of a list it doesn't really make sense to call this a list item since it's much more. Based on that the struct has been renamed to nvgpu_vm_area to capture the actual use of the struct more accurately. This also moves all of the management code of vm areas to a new file devoted solely to vm_area management. Also add a brief overview of the VM architecture. This should help other people follow along the hierachy of ownership and lifetimes in the rather complex MM code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: If85e1cf868031d0dc265e7bed50b58a2aed2602e Signed-off-by: Alex Waterman Reviewed-on: http://git-master/r/1477744 Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/Makefile.nvgpu | 1 + drivers/gpu/nvgpu/common/linux/ioctl_as.c | 6 +- drivers/gpu/nvgpu/common/linux/vm.c | 14 +- drivers/gpu/nvgpu/common/linux/vm_priv.h | 3 +- drivers/gpu/nvgpu/common/mm/vm.c | 13 +- drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 241 ++---------------------------- drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 21 +-- drivers/gpu/nvgpu/include/nvgpu/as.h | 10 +- drivers/gpu/nvgpu/include/nvgpu/vm.h | 41 ++++- drivers/gpu/nvgpu/include/nvgpu/vm_area.h | 63 ++++++++ drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 15 +- 11 files changed, 146 insertions(+), 282 deletions(-) create mode 100644 drivers/gpu/nvgpu/include/nvgpu/vm_area.h (limited to 'drivers/gpu') diff --git a/drivers/gpu/nvgpu/Makefile.nvgpu b/drivers/gpu/nvgpu/Makefile.nvgpu index 2f9d1b36..6bbf49a5 100644 --- a/drivers/gpu/nvgpu/Makefile.nvgpu +++ b/drivers/gpu/nvgpu/Makefile.nvgpu @@ -47,6 +47,7 @@ nvgpu-y := \ common/mm/lockless_allocator.o \ common/mm/gmmu.o \ common/mm/vm.o \ + common/mm/vm_area.o \ common/pramin.o \ common/semaphore.o \ common/as.o \ diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c index 7a24a14f..023f8236 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c @@ -24,6 +24,7 @@ #include #include +#include #include "gk20a/gk20a.h" #include "gk20a/platform_gk20a.h" @@ -56,7 +57,8 @@ static int gk20a_as_ioctl_alloc_space( struct nvgpu_as_alloc_space_args *args) { gk20a_dbg_fn(""); - return gk20a_vm_alloc_space(as_share, args); + return nvgpu_vm_area_alloc(as_share->vm, args->pages, args->page_size, + &args->o_a.offset, args->flags); } static int gk20a_as_ioctl_free_space( @@ -64,7 +66,7 @@ static int gk20a_as_ioctl_free_space( struct nvgpu_as_free_space_args *args) { gk20a_dbg_fn(""); - return gk20a_vm_free_space(as_share, args); + return nvgpu_vm_area_free(as_share->vm, args->offset); } static int gk20a_as_ioctl_map_buffer_ex( diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index 8b9d6f96..5470d9ee 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include "gk20a/gk20a.h" @@ -196,7 +197,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, struct scatterlist *sgl; u64 ctag_map_win_size = 0; u32 ctag_map_win_ctagline = 0; - struct vm_reserved_va_node *va_node = NULL; + struct nvgpu_vm_area *vm_area = NULL; u32 ctag_offset; enum nvgpu_aperture aperture; @@ -256,9 +257,8 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, /* Check if we should use a fixed offset for mapping this buffer */ if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { - err = validate_fixed_buffer(vm, &bfr, - offset_align, mapping_size, - &va_node); + err = nvgpu_vm_area_validate_buffer(vm, offset_align, mapping_size, + bfr.pgsz_idx, &vm_area); if (err) goto clean_up; @@ -376,10 +376,10 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, if (user_mapped) vm->num_user_mapped_buffers++; - if (va_node) { + if (vm_area) { nvgpu_list_add_tail(&mapped_buffer->buffer_list, - &va_node->buffer_list_head); - mapped_buffer->va_node = va_node; + &vm_area->buffer_list_head); + mapped_buffer->vm_area = vm_area; } nvgpu_mutex_release(&vm->update_gmmu_lock); diff --git a/drivers/gpu/nvgpu/common/linux/vm_priv.h b/drivers/gpu/nvgpu/common/linux/vm_priv.h index 9e064d76..14852264 100644 --- a/drivers/gpu/nvgpu/common/linux/vm_priv.h +++ b/drivers/gpu/nvgpu/common/linux/vm_priv.h @@ -77,7 +77,7 @@ enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, int validate_fixed_buffer(struct vm_gk20a *vm, struct buffer_attrs *bfr, u64 map_offset, u64 map_size, - struct vm_reserved_va_node **pva_node); + struct nvgpu_vm_area **pva_node); int setup_buffer_kind_and_compression(struct vm_gk20a *vm, u32 flags, struct buffer_attrs *bfr, @@ -89,6 +89,5 @@ int gk20a_alloc_comptags(struct gk20a *g, u32 lines, bool user_mappable, u64 *ctag_map_win_size, u32 *ctag_map_win_ctagline); -void gk20a_vm_unmap_locked_kref(struct kref *ref); #endif diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 635ac0fb..3bdc905e 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -15,6 +15,7 @@ */ #include +#include #include #include #include @@ -58,7 +59,7 @@ void nvgpu_vm_mapping_batch_finish(struct vm_gk20a *vm, void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) { struct nvgpu_mapped_buf *mapped_buffer; - struct vm_reserved_va_node *va_node, *va_node_tmp; + struct nvgpu_vm_area *vm_area, *vm_area_tmp; struct nvgpu_rbtree_node *node = NULL; struct gk20a *g = vm->mm->g; @@ -86,11 +87,11 @@ void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm) } /* destroy remaining reserved memory areas */ - nvgpu_list_for_each_entry_safe(va_node, va_node_tmp, - &vm->reserved_va_list, - vm_reserved_va_node, reserved_va_list) { - nvgpu_list_del(&va_node->reserved_va_list); - nvgpu_kfree(vm->mm->g, va_node); + nvgpu_list_for_each_entry_safe(vm_area, vm_area_tmp, + &vm->vm_area_list, + nvgpu_vm_area, vm_area_list) { + nvgpu_list_del(&vm_area->vm_area_list); + nvgpu_kfree(vm->mm->g, vm_area); } nvgpu_deinit_vm(vm); diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 84919d50..5051f028 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c @@ -26,6 +26,7 @@ #include #include +#include #include #include #include @@ -1065,19 +1066,6 @@ u32 pte_index_from_vaddr(struct vm_gk20a *vm, return ret; } -static struct vm_reserved_va_node *addr_to_reservation(struct vm_gk20a *vm, - u64 addr) -{ - struct vm_reserved_va_node *va_node; - nvgpu_list_for_each_entry(va_node, &vm->reserved_va_list, - vm_reserved_va_node, reserved_va_list) - if (addr >= va_node->vaddr_start && - addr < (u64)va_node->vaddr_start + (u64)va_node->size) - return va_node; - - return NULL; -} - int nvgpu_vm_get_buffers(struct vm_gk20a *vm, struct nvgpu_mapped_buf ***mapped_buffers, int *num_buffers) @@ -1301,57 +1289,6 @@ int setup_buffer_kind_and_compression(struct vm_gk20a *vm, return 0; } -int validate_fixed_buffer(struct vm_gk20a *vm, - struct buffer_attrs *bfr, - u64 map_offset, u64 map_size, - struct vm_reserved_va_node **pva_node) -{ - struct gk20a *g = vm->mm->g; - struct vm_reserved_va_node *va_node; - struct nvgpu_mapped_buf *buffer; - u64 map_end = map_offset + map_size; - - /* can wrap around with insane map_size; zero is disallowed too */ - if (map_end <= map_offset) { - nvgpu_warn(g, "fixed offset mapping with invalid map_size"); - return -EINVAL; - } - - if (map_offset & (vm->gmmu_page_sizes[bfr->pgsz_idx] - 1)) { - nvgpu_err(g, "map offset must be buffer page size aligned 0x%llx", - map_offset); - return -EINVAL; - } - - /* Find the space reservation, but it's ok to have none for - * userspace-managed address spaces */ - va_node = addr_to_reservation(vm, map_offset); - if (!va_node && !vm->userspace_managed) { - nvgpu_warn(g, "fixed offset mapping without space allocation"); - return -EINVAL; - } - - /* Mapped area should fit inside va, if there's one */ - if (va_node && map_end > va_node->vaddr_start + va_node->size) { - nvgpu_warn(g, "fixed offset mapping size overflows va node"); - return -EINVAL; - } - - /* check that this mapping does not collide with existing - * mappings by checking the buffer with the highest GPU VA - * that is less than our buffer end */ - buffer = __nvgpu_vm_find_mapped_buf_less_than( - vm, map_offset + map_size); - if (buffer && buffer->addr + buffer->size > map_offset) { - nvgpu_warn(g, "overlapping buffer map requested"); - return -EINVAL; - } - - *pva_node = va_node; - - return 0; -} - u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, u64 map_offset, struct sg_table *sgt, @@ -1850,22 +1787,22 @@ int nvgpu_vm_map_compbits(struct vm_gk20a *vm, if (fixed_mapping) { struct buffer_attrs bfr; int err; - struct vm_reserved_va_node *va_node = NULL; + struct nvgpu_vm_area *vm_area = NULL; memset(&bfr, 0, sizeof(bfr)); bfr.pgsz_idx = small_pgsz_index; - err = validate_fixed_buffer( - vm, &bfr, *compbits_win_gva, - mapped_buffer->ctag_map_win_size, &va_node); + err = nvgpu_vm_area_validate_buffer( + vm, *compbits_win_gva, mapped_buffer->ctag_map_win_size, + bfr.pgsz_idx, &vm_area); if (err) { nvgpu_mutex_release(&vm->update_gmmu_lock); return err; } - if (va_node) { + if (vm_area) { /* this would create a dangling GPU VA * pointer if the space is freed * before before the buffer is @@ -2564,8 +2501,8 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, mapped_buffer->pgsz_idx, mapped_buffer->va_allocated, gk20a_mem_flag_none, - mapped_buffer->va_node ? - mapped_buffer->va_node->sparse : false, + mapped_buffer->vm_area ? + mapped_buffer->vm_area->sparse : false, batch); gk20a_dbg(gpu_dbg_map, @@ -2712,13 +2649,13 @@ int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size) enum gmmu_pgsz_gk20a __get_pte_size_fixed_map(struct vm_gk20a *vm, u64 base, u64 size) { - struct vm_reserved_va_node *node; + struct nvgpu_vm_area *vm_area; - node = addr_to_reservation(vm, base); - if (!node) + vm_area = nvgpu_vm_area_find(vm, base); + if (!vm_area) return gmmu_page_size_small; - return node->pgsz_idx; + return vm_area->pgsz_idx; } /* @@ -3012,7 +2949,7 @@ int nvgpu_init_vm(struct mm_gk20a *mm, nvgpu_mutex_init(&vm->update_gmmu_lock); kref_init(&vm->ref); - nvgpu_init_list_node(&vm->reserved_va_list); + nvgpu_init_list_node(&vm->vm_area_list); /* * This is only necessary for channel address spaces. The best way to @@ -3100,158 +3037,6 @@ int gk20a_vm_release_share(struct gk20a_as_share *as_share) return 0; } - -int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, - struct nvgpu_as_alloc_space_args *args) - -{ - int err = -ENOMEM; - int pgsz_idx = gmmu_page_size_small; - struct nvgpu_allocator *vma; - struct vm_gk20a *vm = as_share->vm; - struct gk20a *g = vm->mm->g; - struct vm_reserved_va_node *va_node; - u64 vaddr_start = 0; - int page_sizes = gmmu_nr_page_sizes; - - gk20a_dbg_fn("flags=0x%x pgsz=0x%x nr_pages=0x%x o/a=0x%llx", - args->flags, args->page_size, args->pages, - args->o_a.offset); - - if (!vm->big_pages) - page_sizes--; - - for (; pgsz_idx < page_sizes; pgsz_idx++) { - if (vm->gmmu_page_sizes[pgsz_idx] == args->page_size) - break; - } - - if (pgsz_idx >= page_sizes) { - err = -EINVAL; - goto clean_up; - } - - va_node = nvgpu_kzalloc(g, sizeof(*va_node)); - if (!va_node) { - err = -ENOMEM; - goto clean_up; - } - - vma = vm->vma[pgsz_idx]; - if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) - vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset, - (u64)args->pages * - (u64)args->page_size, - args->page_size); - else - vaddr_start = nvgpu_alloc(vma, - (u64)args->pages * - (u64)args->page_size); - - if (!vaddr_start) { - nvgpu_kfree(g, va_node); - goto clean_up; - } - - va_node->vaddr_start = vaddr_start; - va_node->size = (u64)args->page_size * (u64)args->pages; - va_node->pgsz_idx = pgsz_idx; - nvgpu_init_list_node(&va_node->buffer_list_head); - nvgpu_init_list_node(&va_node->reserved_va_list); - - nvgpu_mutex_acquire(&vm->update_gmmu_lock); - - /* mark that we need to use sparse mappings here */ - if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) { - u64 map_offset = g->ops.mm.gmmu_map(vm, vaddr_start, - NULL, - 0, - va_node->size, - pgsz_idx, - 0, - 0, - args->flags, - gk20a_mem_flag_none, - false, - true, - false, - NULL, - APERTURE_INVALID); - if (!map_offset) { - nvgpu_mutex_release(&vm->update_gmmu_lock); - nvgpu_free(vma, vaddr_start); - nvgpu_kfree(g, va_node); - goto clean_up; - } - - va_node->sparse = true; - } - nvgpu_list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list); - - nvgpu_mutex_release(&vm->update_gmmu_lock); - - args->o_a.offset = vaddr_start; - err = 0; - -clean_up: - return err; -} - -int gk20a_vm_free_space(struct gk20a_as_share *as_share, - struct nvgpu_as_free_space_args *args) -{ - int err = -ENOMEM; - int pgsz_idx; - struct nvgpu_allocator *vma; - struct vm_gk20a *vm = as_share->vm; - struct vm_reserved_va_node *va_node; - struct gk20a *g = gk20a_from_vm(vm); - - gk20a_dbg_fn("pgsz=0x%x nr_pages=0x%x o/a=0x%llx", args->page_size, - args->pages, args->offset); - - /* determine pagesz idx */ - pgsz_idx = __get_pte_size(vm, args->offset, - args->page_size * args->pages); - - vma = vm->vma[pgsz_idx]; - nvgpu_free(vma, args->offset); - - nvgpu_mutex_acquire(&vm->update_gmmu_lock); - va_node = addr_to_reservation(vm, args->offset); - if (va_node) { - struct nvgpu_mapped_buf *buffer, *n; - - /* Decrement the ref count on all buffers in this va_node. This - * allows userspace to let the kernel free mappings that are - * only used by this va_node. */ - nvgpu_list_for_each_entry_safe(buffer, n, - &va_node->buffer_list_head, - nvgpu_mapped_buf, buffer_list) { - nvgpu_list_del(&buffer->buffer_list); - kref_put(&buffer->ref, gk20a_vm_unmap_locked_kref); - } - - nvgpu_list_del(&va_node->reserved_va_list); - - /* if this was a sparse mapping, free the va */ - if (va_node->sparse) - g->ops.mm.gmmu_unmap(vm, - va_node->vaddr_start, - va_node->size, - va_node->pgsz_idx, - true, - gk20a_mem_flag_none, - true, - NULL); - nvgpu_kfree(g, va_node); - } - nvgpu_mutex_release(&vm->update_gmmu_lock); - err = 0; - - return err; -} - int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch) { int err = 0; diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 357962c7..6ddf842a 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h @@ -140,22 +140,6 @@ struct priv_cmd_entry { u32 size; /* in words */ }; -struct vm_reserved_va_node { - struct nvgpu_list_node reserved_va_list; - struct nvgpu_list_node buffer_list_head; - u32 pgsz_idx; - u64 vaddr_start; - u64 size; - bool sparse; -}; - -static inline struct vm_reserved_va_node * -vm_reserved_va_node_from_reserved_va_list(struct nvgpu_list_node *node) -{ - return (struct vm_reserved_va_node *) - ((uintptr_t)node - offsetof(struct vm_reserved_va_node, reserved_va_list)); -}; - struct gk20a; struct channel_gk20a; @@ -442,10 +426,6 @@ struct nvgpu_as_free_space_args; int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size, u32 flags); int gk20a_vm_release_share(struct gk20a_as_share *as_share); -int gk20a_vm_alloc_space(struct gk20a_as_share *as_share, - struct nvgpu_as_alloc_space_args *args); -int gk20a_vm_free_space(struct gk20a_as_share *as_share, - struct nvgpu_as_free_space_args *args); int gk20a_vm_bind_channel(struct gk20a_as_share *as_share, struct channel_gk20a *ch); int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch); @@ -491,5 +471,6 @@ extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd, u64 *buffer_id, u64 *buffer_len); +void gk20a_vm_unmap_locked_kref(struct kref *ref); #endif /* MM_GK20A_H */ diff --git a/drivers/gpu/nvgpu/include/nvgpu/as.h b/drivers/gpu/nvgpu/include/nvgpu/as.h index 0e784396..e3233f87 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/as.h +++ b/drivers/gpu/nvgpu/include/nvgpu/as.h @@ -17,14 +17,14 @@ struct vm_gk20a; +struct gk20a_as { + int last_share_id; /* dummy allocator for now */ +}; + struct gk20a_as_share { struct gk20a_as *as; - int id; struct vm_gk20a *vm; -}; - -struct gk20a_as { - int last_share_id; /* dummy allocator for now */ + int id; }; int gk20a_as_release_share(struct gk20a_as_share *as_share); diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index e1ceffd4..69c08c77 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -26,11 +26,10 @@ #include struct vm_gk20a; -struct vm_reserved_va_node; +struct nvgpu_vm_area; struct buffer_attrs; struct gk20a_comptag_allocator; - /** * This header contains the OS agnostic APIs for dealing with VMs. Most of the * VM implementation is system specific - it must translate from a platform's @@ -39,6 +38,38 @@ struct gk20a_comptag_allocator; * However, some stuff is platform agnostic. VM ref-counting and the VM struct * itself are platform agnostic. Also, the initialization and destruction of * VMs is the same across all platforms (for now). + * + * VM Architecture: + * ---------------- + * + * The VM managment in nvgpu is split up as follows: a vm_gk20a struct which + * defines an address space. Each address space is a set of page tables and a + * GPU Virtual Address (GVA) allocator. Any number of channels may bind to a VM. + * + * +----+ +----+ +----+ +-----+ +-----+ + * | C1 | | C2 | ... | Cn | | VM1 | ... | VMn | + * +-+--+ +-+--+ +-+--+ +--+--+ +--+--+ + * | | | | | + * | | +----->-----+ | + * | +---------------->-----+ | + * +------------------------>-----------------+ + * + * Each VM also manages a set of mapped buffers (struct nvgpu_mapped_buf) + * which corresponds to _user space_ buffers which have been mapped into this VM. + * Kernel space mappings (created by nvgpu_gmmu_map()) are not tracked by VMs. + * This may be an architectural bug, but for now it seems to be OK. VMs can be + * closed in various ways - refs counts hitting zero, direct calls to the remove + * routine, etc. Note: this is going to change. VM cleanup is going to be + * homogonized around ref-counts. When a VM is closed all mapped buffers in the + * VM are unmapped from the GMMU. This means that those mappings will no longer + * be valid and any subsequent access by the GPU will fault. That means one must + * ensure the VM is not in use before closing it. + * + * VMs may also contain VM areas (struct nvgpu_vm_area) which are created for + * the purpose of sparse and/or fixed mappings. If userspace wishes to create a + * fixed mapping it must first create a VM area - either with a fixed address or + * not. VM areas are reserved - other mapping operations will not use the space. + * Userspace may then create fixed mappings within that VM area. */ /* map/unmap batch state */ @@ -49,9 +80,10 @@ struct vm_gk20a_mapping_batch { struct nvgpu_mapped_buf { struct vm_gk20a *vm; + struct nvgpu_vm_area *vm_area; + struct nvgpu_rbtree_node node; struct nvgpu_list_node buffer_list; - struct vm_reserved_va_node *va_node; u64 addr; u64 size; struct dma_buf *dmabuf; @@ -102,7 +134,6 @@ struct vm_gk20a { bool big_pages; /* enable large page support */ bool enable_ctag; - bool mapped; u32 big_page_size; @@ -129,7 +160,7 @@ struct vm_gk20a { struct nvgpu_rbtree_node *mapped_buffers; - struct nvgpu_list_node reserved_va_list; + struct nvgpu_list_node vm_area_list; #ifdef CONFIG_TEGRA_GR_VIRTUALIZATION u64 handle; diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm_area.h b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h new file mode 100644 index 00000000..ffe4b99b --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __NVGPU_VM_AREA_H__ +#define __NVGPU_VM_AREA_H__ + +#include +#include + +struct vm_gk20a; +struct gk20a_as_share; +struct nvgpu_as_alloc_space_args; +struct nvgpu_as_free_space_args; + +struct nvgpu_vm_area { + /* + * Entry into the list of VM areas owned by a VM. + */ + struct nvgpu_list_node vm_area_list; + + /* + * List of buffers mapped into this vm_area. + */ + struct nvgpu_list_node buffer_list_head; + + u32 flags; + u32 pgsz_idx; + u64 addr; + u64 size; + bool sparse; +}; + +static inline struct nvgpu_vm_area * +nvgpu_vm_area_from_vm_area_list(struct nvgpu_list_node *node) +{ + return (struct nvgpu_vm_area *) + ((uintptr_t)node - offsetof(struct nvgpu_vm_area, + vm_area_list)); +}; + +int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, + u64 *addr, u32 flags); +int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr); + +struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr); +int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm, + u64 map_offset, u64 map_size, int pgsz_idx, + struct nvgpu_vm_area **pvm_area); + +#endif diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index f4004f42..b42fbcb3 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "vgpu/vgpu.h" #include "gk20a/mm_gk20a.h" @@ -203,7 +204,7 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) { struct gk20a *g = vm->mm->g; struct nvgpu_mapped_buf *mapped_buffer; - struct vm_reserved_va_node *va_node, *va_node_tmp; + struct nvgpu_vm_area *vm_area, *vm_area_tmp; struct tegra_vgpu_cmd_msg msg; struct tegra_vgpu_as_share_params *p = &msg.params.as_share; struct nvgpu_rbtree_node *node = NULL; @@ -223,11 +224,11 @@ static void vgpu_vm_remove_support(struct vm_gk20a *vm) } /* destroy remaining reserved memory areas */ - nvgpu_list_for_each_entry_safe(va_node, va_node_tmp, - &vm->reserved_va_list, - vm_reserved_va_node, reserved_va_list) { - nvgpu_list_del(&va_node->reserved_va_list); - nvgpu_kfree(g, va_node); + nvgpu_list_for_each_entry_safe(vm_area, vm_area_tmp, + &vm->vm_area_list, + nvgpu_vm_area, vm_area_list) { + nvgpu_list_del(&vm_area->vm_area_list); + nvgpu_kfree(g, vm_area); } msg.cmd = TEGRA_VGPU_CMD_AS_FREE_SHARE; @@ -413,7 +414,7 @@ static int vgpu_vm_alloc_share(struct gk20a_as_share *as_share, nvgpu_mutex_init(&vm->update_gmmu_lock); kref_init(&vm->ref); - nvgpu_init_list_node(&vm->reserved_va_list); + nvgpu_init_list_node(&vm->vm_area_list); vm->enable_ctag = true; -- cgit v1.2.2