diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-04-24 18:26:00 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-05-19 18:34:06 -0400 |
commit | 014ace5a85f274de7debb4c6168d69c803445e19 (patch) | |
tree | 4028be3294b95e38659f1ebba4a14457748e59f1 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |
parent | d37e8f7dcf190f31f9c0c12583db2bb0c0d313c0 (diff) |
gpu: nvgpu: Split VM implementation out
This patch begins splitting out the VM implementation from mm_gk20a.c and
moves it to common/linux/vm.c and common/mm/vm.c. This split is necessary
because the VM code has two portions: first, an interface for the OS
specific code to use (i.e userspace mappings), and second, a set of APIs
for the driver to use (init, cleanup, etc) which are not OS specific.
This is only the beginning of the split - there's still a lot of things
that need to be carefully moved around.
JIRA NVGPU-12
JIRA NVGPU-30
Change-Id: I3b57cba245d7daf9e4326a143b9c6217e0f28c96
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1477743
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 52 |
1 files changed, 1 insertions, 51 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 331843cc..357962c7 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -140,52 +140,9 @@ struct priv_cmd_entry { | |||
140 | u32 size; /* in words */ | 140 | u32 size; /* in words */ |
141 | }; | 141 | }; |
142 | 142 | ||
143 | struct mapped_buffer_node { | ||
144 | struct vm_gk20a *vm; | ||
145 | struct nvgpu_rbtree_node node; | ||
146 | struct nvgpu_list_node va_buffers_list; | ||
147 | struct vm_reserved_va_node *va_node; | ||
148 | u64 addr; | ||
149 | u64 size; | ||
150 | struct dma_buf *dmabuf; | ||
151 | struct sg_table *sgt; | ||
152 | struct kref ref; | ||
153 | u32 user_mapped; | ||
154 | bool own_mem_ref; | ||
155 | u32 pgsz_idx; | ||
156 | u32 ctag_offset; | ||
157 | u32 ctag_lines; | ||
158 | u32 ctag_allocated_lines; | ||
159 | |||
160 | /* For comptag mapping, these are the mapping window parameters */ | ||
161 | bool ctags_mappable; | ||
162 | u64 ctag_map_win_addr; /* non-zero if mapped */ | ||
163 | u64 ctag_map_win_size; /* non-zero if ctags_mappable */ | ||
164 | u32 ctag_map_win_ctagline; /* ctagline at win start, set if | ||
165 | * ctags_mappable */ | ||
166 | |||
167 | u32 flags; | ||
168 | u32 kind; | ||
169 | bool va_allocated; | ||
170 | }; | ||
171 | |||
172 | static inline struct mapped_buffer_node * | ||
173 | mapped_buffer_node_from_va_buffers_list(struct nvgpu_list_node *node) | ||
174 | { | ||
175 | return (struct mapped_buffer_node *) | ||
176 | ((uintptr_t)node - offsetof(struct mapped_buffer_node, va_buffers_list)); | ||
177 | }; | ||
178 | |||
179 | static inline struct mapped_buffer_node * | ||
180 | mapped_buffer_from_rbtree_node(struct nvgpu_rbtree_node *node) | ||
181 | { | ||
182 | return (struct mapped_buffer_node *) | ||
183 | ((uintptr_t)node - offsetof(struct mapped_buffer_node, node)); | ||
184 | }; | ||
185 | |||
186 | struct vm_reserved_va_node { | 143 | struct vm_reserved_va_node { |
187 | struct nvgpu_list_node reserved_va_list; | 144 | struct nvgpu_list_node reserved_va_list; |
188 | struct nvgpu_list_node va_buffers_list; | 145 | struct nvgpu_list_node buffer_list_head; |
189 | u32 pgsz_idx; | 146 | u32 pgsz_idx; |
190 | u64 vaddr_start; | 147 | u64 vaddr_start; |
191 | u64 size; | 148 | u64 size; |
@@ -431,11 +388,6 @@ static inline phys_addr_t gk20a_mem_phys(struct nvgpu_mem *mem) | |||
431 | return 0; | 388 | return 0; |
432 | } | 389 | } |
433 | 390 | ||
434 | u32 __nvgpu_aperture_mask(struct gk20a *g, enum nvgpu_aperture aperture, | ||
435 | u32 sysmem_mask, u32 vidmem_mask); | ||
436 | u32 nvgpu_aperture_mask(struct gk20a *g, struct nvgpu_mem *mem, | ||
437 | u32 sysmem_mask, u32 vidmem_mask); | ||
438 | |||
439 | void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, | 391 | void gk20a_pde_wr32(struct gk20a *g, struct gk20a_mm_entry *entry, |
440 | size_t w, size_t data); | 392 | size_t w, size_t data); |
441 | u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry); | 393 | u64 gk20a_pde_addr(struct gk20a *g, struct gk20a_mm_entry *entry); |
@@ -532,8 +484,6 @@ const struct gk20a_mmu_level *gk20a_mm_get_mmu_levels(struct gk20a *g, | |||
532 | void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem, | 484 | void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *mem, |
533 | struct vm_gk20a *vm); | 485 | struct vm_gk20a *vm); |
534 | 486 | ||
535 | void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block); | ||
536 | |||
537 | int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); | 487 | int gk20a_big_pages_possible(struct vm_gk20a *vm, u64 base, u64 size); |
538 | 488 | ||
539 | extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; | 489 | extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; |