summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/include
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-25 18:56:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-19 18:34:12 -0400
commit29cc82844e03b6f9f0e6801169b6fa0e72d56628 (patch)
treef616b6c651ce80765ee344aa33ca204c555e67f2 /drivers/gpu/nvgpu/include
parent014ace5a85f274de7debb4c6168d69c803445e19 (diff)
gpu: nvgpu: Split vm_area management into vm code
The vm_reserve_va_node struct is essentially a special VM area that can be used for sparse mappings and fixed mappings. The name of this struct is somewhat confusing (as node is typically used for list items). Though this struct is a part of a list it doesn't really make sense to call this a list item since it's much more. Based on that the struct has been renamed to nvgpu_vm_area to capture the actual use of the struct more accurately. This also moves all of the management code of vm areas to a new file devoted solely to vm_area management. Also add a brief overview of the VM architecture. This should help other people follow along the hierachy of ownership and lifetimes in the rather complex MM code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: If85e1cf868031d0dc265e7bed50b58a2aed2602e Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477744 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/include')
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/as.h10
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h41
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm_area.h63
3 files changed, 104 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/include/nvgpu/as.h b/drivers/gpu/nvgpu/include/nvgpu/as.h
index 0e784396..e3233f87 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/as.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/as.h
@@ -17,14 +17,14 @@
17 17
18struct vm_gk20a; 18struct vm_gk20a;
19 19
20struct gk20a_as {
21 int last_share_id; /* dummy allocator for now */
22};
23
20struct gk20a_as_share { 24struct gk20a_as_share {
21 struct gk20a_as *as; 25 struct gk20a_as *as;
22 int id;
23 struct vm_gk20a *vm; 26 struct vm_gk20a *vm;
24}; 27 int id;
25
26struct gk20a_as {
27 int last_share_id; /* dummy allocator for now */
28}; 28};
29 29
30int gk20a_as_release_share(struct gk20a_as_share *as_share); 30int gk20a_as_release_share(struct gk20a_as_share *as_share);
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index e1ceffd4..69c08c77 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -26,11 +26,10 @@
26#include <nvgpu/allocator.h> 26#include <nvgpu/allocator.h>
27 27
28struct vm_gk20a; 28struct vm_gk20a;
29struct vm_reserved_va_node; 29struct nvgpu_vm_area;
30struct buffer_attrs; 30struct buffer_attrs;
31struct gk20a_comptag_allocator; 31struct gk20a_comptag_allocator;
32 32
33
34/** 33/**
35 * This header contains the OS agnostic APIs for dealing with VMs. Most of the 34 * This header contains the OS agnostic APIs for dealing with VMs. Most of the
36 * VM implementation is system specific - it must translate from a platform's 35 * VM implementation is system specific - it must translate from a platform's
@@ -39,6 +38,38 @@ struct gk20a_comptag_allocator;
39 * However, some stuff is platform agnostic. VM ref-counting and the VM struct 38 * However, some stuff is platform agnostic. VM ref-counting and the VM struct
40 * itself are platform agnostic. Also, the initialization and destruction of 39 * itself are platform agnostic. Also, the initialization and destruction of
41 * VMs is the same across all platforms (for now). 40 * VMs is the same across all platforms (for now).
41 *
42 * VM Architecture:
43 * ----------------
44 *
45 * The VM managment in nvgpu is split up as follows: a vm_gk20a struct which
46 * defines an address space. Each address space is a set of page tables and a
47 * GPU Virtual Address (GVA) allocator. Any number of channels may bind to a VM.
48 *
49 * +----+ +----+ +----+ +-----+ +-----+
50 * | C1 | | C2 | ... | Cn | | VM1 | ... | VMn |
51 * +-+--+ +-+--+ +-+--+ +--+--+ +--+--+
52 * | | | | |
53 * | | +----->-----+ |
54 * | +---------------->-----+ |
55 * +------------------------>-----------------+
56 *
57 * Each VM also manages a set of mapped buffers (struct nvgpu_mapped_buf)
58 * which corresponds to _user space_ buffers which have been mapped into this VM.
59 * Kernel space mappings (created by nvgpu_gmmu_map()) are not tracked by VMs.
60 * This may be an architectural bug, but for now it seems to be OK. VMs can be
61 * closed in various ways - refs counts hitting zero, direct calls to the remove
62 * routine, etc. Note: this is going to change. VM cleanup is going to be
63 * homogonized around ref-counts. When a VM is closed all mapped buffers in the
64 * VM are unmapped from the GMMU. This means that those mappings will no longer
65 * be valid and any subsequent access by the GPU will fault. That means one must
66 * ensure the VM is not in use before closing it.
67 *
68 * VMs may also contain VM areas (struct nvgpu_vm_area) which are created for
69 * the purpose of sparse and/or fixed mappings. If userspace wishes to create a
70 * fixed mapping it must first create a VM area - either with a fixed address or
71 * not. VM areas are reserved - other mapping operations will not use the space.
72 * Userspace may then create fixed mappings within that VM area.
42 */ 73 */
43 74
44/* map/unmap batch state */ 75/* map/unmap batch state */
@@ -49,9 +80,10 @@ struct vm_gk20a_mapping_batch {
49 80
50struct nvgpu_mapped_buf { 81struct nvgpu_mapped_buf {
51 struct vm_gk20a *vm; 82 struct vm_gk20a *vm;
83 struct nvgpu_vm_area *vm_area;
84
52 struct nvgpu_rbtree_node node; 85 struct nvgpu_rbtree_node node;
53 struct nvgpu_list_node buffer_list; 86 struct nvgpu_list_node buffer_list;
54 struct vm_reserved_va_node *va_node;
55 u64 addr; 87 u64 addr;
56 u64 size; 88 u64 size;
57 struct dma_buf *dmabuf; 89 struct dma_buf *dmabuf;
@@ -102,7 +134,6 @@ struct vm_gk20a {
102 134
103 bool big_pages; /* enable large page support */ 135 bool big_pages; /* enable large page support */
104 bool enable_ctag; 136 bool enable_ctag;
105 bool mapped;
106 137
107 u32 big_page_size; 138 u32 big_page_size;
108 139
@@ -129,7 +160,7 @@ struct vm_gk20a {
129 160
130 struct nvgpu_rbtree_node *mapped_buffers; 161 struct nvgpu_rbtree_node *mapped_buffers;
131 162
132 struct nvgpu_list_node reserved_va_list; 163 struct nvgpu_list_node vm_area_list;
133 164
134#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 165#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
135 u64 handle; 166 u64 handle;
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm_area.h b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
new file mode 100644
index 00000000..ffe4b99b
--- /dev/null
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm_area.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#ifndef __NVGPU_VM_AREA_H__
18#define __NVGPU_VM_AREA_H__
19
20#include <nvgpu/list.h>
21#include <nvgpu/types.h>
22
23struct vm_gk20a;
24struct gk20a_as_share;
25struct nvgpu_as_alloc_space_args;
26struct nvgpu_as_free_space_args;
27
28struct nvgpu_vm_area {
29 /*
30 * Entry into the list of VM areas owned by a VM.
31 */
32 struct nvgpu_list_node vm_area_list;
33
34 /*
35 * List of buffers mapped into this vm_area.
36 */
37 struct nvgpu_list_node buffer_list_head;
38
39 u32 flags;
40 u32 pgsz_idx;
41 u64 addr;
42 u64 size;
43 bool sparse;
44};
45
46static inline struct nvgpu_vm_area *
47nvgpu_vm_area_from_vm_area_list(struct nvgpu_list_node *node)
48{
49 return (struct nvgpu_vm_area *)
50 ((uintptr_t)node - offsetof(struct nvgpu_vm_area,
51 vm_area_list));
52};
53
54int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size,
55 u64 *addr, u32 flags);
56int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr);
57
58struct nvgpu_vm_area *nvgpu_vm_area_find(struct vm_gk20a *vm, u64 addr);
59int nvgpu_vm_area_validate_buffer(struct vm_gk20a *vm,
60 u64 map_offset, u64 map_size, int pgsz_idx,
61 struct nvgpu_vm_area **pvm_area);
62
63#endif