summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/include
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-24 18:26:00 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-19 18:34:06 -0400
commit014ace5a85f274de7debb4c6168d69c803445e19 (patch)
tree4028be3294b95e38659f1ebba4a14457748e59f1 /drivers/gpu/nvgpu/include
parentd37e8f7dcf190f31f9c0c12583db2bb0c0d313c0 (diff)
gpu: nvgpu: Split VM implementation out
This patch begins splitting out the VM implementation from mm_gk20a.c and moves it to common/linux/vm.c and common/mm/vm.c. This split is necessary because the VM code has two portions: first, an interface for the OS specific code to use (i.e userspace mappings), and second, a set of APIs for the driver to use (init, cleanup, etc) which are not OS specific. This is only the beginning of the split - there's still a lot of things that need to be carefully moved around. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: I3b57cba245d7daf9e4326a143b9c6217e0f28c96 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477743 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/include')
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/vm.h78
1 files changed, 74 insertions, 4 deletions
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h
index 1fb772d5..e1ceffd4 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/vm.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h
@@ -26,7 +26,10 @@
26#include <nvgpu/allocator.h> 26#include <nvgpu/allocator.h>
27 27
28struct vm_gk20a; 28struct vm_gk20a;
29struct mapped_buffer_node; 29struct vm_reserved_va_node;
30struct buffer_attrs;
31struct gk20a_comptag_allocator;
32
30 33
31/** 34/**
32 * This header contains the OS agnostic APIs for dealing with VMs. Most of the 35 * This header contains the OS agnostic APIs for dealing with VMs. Most of the
@@ -44,6 +47,50 @@ struct vm_gk20a_mapping_batch {
44 bool need_tlb_invalidate; 47 bool need_tlb_invalidate;
45}; 48};
46 49
50struct nvgpu_mapped_buf {
51 struct vm_gk20a *vm;
52 struct nvgpu_rbtree_node node;
53 struct nvgpu_list_node buffer_list;
54 struct vm_reserved_va_node *va_node;
55 u64 addr;
56 u64 size;
57 struct dma_buf *dmabuf;
58 struct sg_table *sgt;
59 struct kref ref;
60 u32 user_mapped;
61 bool own_mem_ref;
62 u32 pgsz_idx;
63 u32 ctag_offset;
64 u32 ctag_lines;
65 u32 ctag_allocated_lines;
66
67 /* For comptag mapping, these are the mapping window parameters */
68 bool ctags_mappable;
69 u64 ctag_map_win_addr; /* non-zero if mapped */
70 u64 ctag_map_win_size; /* non-zero if ctags_mappable */
71 u32 ctag_map_win_ctagline; /* ctagline at win start, set if
72 * ctags_mappable */
73
74 u32 flags;
75 u32 kind;
76 bool va_allocated;
77};
78
79static inline struct nvgpu_mapped_buf *
80nvgpu_mapped_buf_from_buffer_list(struct nvgpu_list_node *node)
81{
82 return (struct nvgpu_mapped_buf *)
83 ((uintptr_t)node - offsetof(struct nvgpu_mapped_buf,
84 buffer_list));
85}
86
87static inline struct nvgpu_mapped_buf *
88mapped_buffer_from_rbtree_node(struct nvgpu_rbtree_node *node)
89{
90 return (struct nvgpu_mapped_buf *)
91 ((uintptr_t)node - offsetof(struct nvgpu_mapped_buf, node));
92}
93
47struct vm_gk20a { 94struct vm_gk20a {
48 struct mm_gk20a *mm; 95 struct mm_gk20a *mm;
49 struct gk20a_as_share *as_share; /* as_share this represents */ 96 struct gk20a_as_share *as_share; /* as_share this represents */
@@ -102,6 +149,8 @@ struct vm_gk20a {
102void nvgpu_vm_get(struct vm_gk20a *vm); 149void nvgpu_vm_get(struct vm_gk20a *vm);
103void nvgpu_vm_put(struct vm_gk20a *vm); 150void nvgpu_vm_put(struct vm_gk20a *vm);
104 151
152int vm_aspace_id(struct vm_gk20a *vm);
153
105/* batching eliminates redundant cache flushes and invalidates */ 154/* batching eliminates redundant cache flushes and invalidates */
106void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch); 155void nvgpu_vm_mapping_batch_start(struct vm_gk20a_mapping_batch *batch);
107void nvgpu_vm_mapping_batch_finish( 156void nvgpu_vm_mapping_batch_finish(
@@ -112,24 +161,45 @@ void nvgpu_vm_mapping_batch_finish_locked(
112 161
113/* get reference to all currently mapped buffers */ 162/* get reference to all currently mapped buffers */
114int nvgpu_vm_get_buffers(struct vm_gk20a *vm, 163int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
115 struct mapped_buffer_node ***mapped_buffers, 164 struct nvgpu_mapped_buf ***mapped_buffers,
116 int *num_buffers); 165 int *num_buffers);
117 166
118/* put references on the given buffers */ 167/* put references on the given buffers */
119void nvgpu_vm_put_buffers(struct vm_gk20a *vm, 168void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
120 struct mapped_buffer_node **mapped_buffers, 169 struct nvgpu_mapped_buf **mapped_buffers,
121 int num_buffers); 170 int num_buffers);
122 171
123/* Note: batch may be NULL if unmap op is not part of a batch */ 172/* Note: batch may be NULL if unmap op is not part of a batch */
124int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, 173int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
125 struct vm_gk20a_mapping_batch *batch); 174 struct vm_gk20a_mapping_batch *batch);
126 175
127void nvgpu_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer, 176void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer,
128 struct vm_gk20a_mapping_batch *batch); 177 struct vm_gk20a_mapping_batch *batch);
129 178
179/*
180 * These all require the VM update lock to be held.
181 */
182struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf(
183 struct vm_gk20a *vm, u64 addr);
184struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_range(
185 struct vm_gk20a *vm, u64 addr);
186struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than(
187 struct vm_gk20a *vm, u64 addr);
188
189int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va,
190 struct dma_buf **dmabuf,
191 u64 *offset);
192
193int nvgpu_insert_mapped_buf(struct vm_gk20a *vm,
194 struct nvgpu_mapped_buf *mapped_buffer);
195void nvgpu_remove_mapped_buf(struct vm_gk20a *vm,
196 struct nvgpu_mapped_buf *mapped_buffer);
197
130void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm); 198void nvgpu_vm_remove_support_nofree(struct vm_gk20a *vm);
131void nvgpu_vm_remove_support(struct vm_gk20a *vm); 199void nvgpu_vm_remove_support(struct vm_gk20a *vm);
132 200
201void nvgpu_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block);
202
133int nvgpu_init_vm(struct mm_gk20a *mm, 203int nvgpu_init_vm(struct mm_gk20a *mm,
134 struct vm_gk20a *vm, 204 struct vm_gk20a *vm,
135 u32 big_page_size, 205 u32 big_page_size,