summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-04-25 18:56:12 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-05-19 18:34:12 -0400
commit29cc82844e03b6f9f0e6801169b6fa0e72d56628 (patch)
treef616b6c651ce80765ee344aa33ca204c555e67f2 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parent014ace5a85f274de7debb4c6168d69c803445e19 (diff)
gpu: nvgpu: Split vm_area management into vm code
The vm_reserve_va_node struct is essentially a special VM area that can be used for sparse mappings and fixed mappings. The name of this struct is somewhat confusing (as node is typically used for list items). Though this struct is a part of a list it doesn't really make sense to call this a list item since it's much more. Based on that the struct has been renamed to nvgpu_vm_area to capture the actual use of the struct more accurately. This also moves all of the management code of vm areas to a new file devoted solely to vm_area management. Also add a brief overview of the VM architecture. This should help other people follow along the hierachy of ownership and lifetimes in the rather complex MM code. JIRA NVGPU-12 JIRA NVGPU-30 Change-Id: If85e1cf868031d0dc265e7bed50b58a2aed2602e Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1477744 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h21
1 files changed, 1 insertions, 20 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 357962c7..6ddf842a 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -140,22 +140,6 @@ struct priv_cmd_entry {
140 u32 size; /* in words */ 140 u32 size; /* in words */
141}; 141};
142 142
143struct vm_reserved_va_node {
144 struct nvgpu_list_node reserved_va_list;
145 struct nvgpu_list_node buffer_list_head;
146 u32 pgsz_idx;
147 u64 vaddr_start;
148 u64 size;
149 bool sparse;
150};
151
152static inline struct vm_reserved_va_node *
153vm_reserved_va_node_from_reserved_va_list(struct nvgpu_list_node *node)
154{
155 return (struct vm_reserved_va_node *)
156 ((uintptr_t)node - offsetof(struct vm_reserved_va_node, reserved_va_list));
157};
158
159struct gk20a; 143struct gk20a;
160struct channel_gk20a; 144struct channel_gk20a;
161 145
@@ -442,10 +426,6 @@ struct nvgpu_as_free_space_args;
442int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size, 426int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size,
443 u32 flags); 427 u32 flags);
444int gk20a_vm_release_share(struct gk20a_as_share *as_share); 428int gk20a_vm_release_share(struct gk20a_as_share *as_share);
445int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
446 struct nvgpu_as_alloc_space_args *args);
447int gk20a_vm_free_space(struct gk20a_as_share *as_share,
448 struct nvgpu_as_free_space_args *args);
449int gk20a_vm_bind_channel(struct gk20a_as_share *as_share, 429int gk20a_vm_bind_channel(struct gk20a_as_share *as_share,
450 struct channel_gk20a *ch); 430 struct channel_gk20a *ch);
451int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch); 431int __gk20a_vm_bind_channel(struct vm_gk20a *vm, struct channel_gk20a *ch);
@@ -491,5 +471,6 @@ extern const struct gk20a_mmu_level gk20a_mm_levels_128k[];
491 471
492int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd, 472int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd,
493 u64 *buffer_id, u64 *buffer_len); 473 u64 *buffer_id, u64 *buffer_len);
474void gk20a_vm_unmap_locked_kref(struct kref *ref);
494 475
495#endif /* MM_GK20A_H */ 476#endif /* MM_GK20A_H */