diff options
author | Terje Bergstrom <tbergstrom@nvidia.com> | 2015-02-09 23:54:01 -0500 |
---|---|---|
committer | Dan Willemsen <dwillemsen@nvidia.com> | 2015-04-04 21:07:35 -0400 |
commit | f9fd5bbabe0d188a06d25bacdb18b91ef65a147d (patch) | |
tree | ecf651164e5fbdbba48eec53291f2cef9ac715e7 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |
parent | 9bf82585aa24b6052855c119855abef92671d502 (diff) |
gpu: nvgpu: Unify PDE & PTE structs
Introduce a new struct gk20a_mm_entry. Allocate and store PDE and PTE
arrays using the same structure. Always pass pointer to this struct
when possible between functions in memory code.
Change-Id: Ia4a2a6abdac9ab7ba522dafbf73fc3a3d5355c5f
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-on: http://git-master/r/696414
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 36 |
1 files changed, 10 insertions, 26 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index 663bd5d3..40e9488d 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -195,15 +195,6 @@ struct gk20a_buffer_state { | |||
195 | struct gk20a_fence *fence; | 195 | struct gk20a_fence *fence; |
196 | }; | 196 | }; |
197 | 197 | ||
198 | struct page_table_gk20a { | ||
199 | /* backing for */ | ||
200 | /* Either a *page or a *mem_handle */ | ||
201 | void *ref; | ||
202 | /* track mapping cnt on this page table */ | ||
203 | struct sg_table *sgt; | ||
204 | size_t size; | ||
205 | }; | ||
206 | |||
207 | enum gmmu_pgsz_gk20a { | 198 | enum gmmu_pgsz_gk20a { |
208 | gmmu_page_size_small = 0, | 199 | gmmu_page_size_small = 0, |
209 | gmmu_page_size_big = 1, | 200 | gmmu_page_size_big = 1, |
@@ -215,16 +206,14 @@ struct gk20a_comptags { | |||
215 | u32 lines; | 206 | u32 lines; |
216 | }; | 207 | }; |
217 | 208 | ||
218 | 209 | struct gk20a_mm_entry { | |
219 | struct page_directory_gk20a { | ||
220 | /* backing for */ | 210 | /* backing for */ |
221 | u32 num_pdes; | 211 | void *cpu_va; |
222 | void *kv; | ||
223 | /* Either a *page or a *mem_handle */ | ||
224 | void *ref; | ||
225 | struct sg_table *sgt; | 212 | struct sg_table *sgt; |
213 | struct page **pages; | ||
226 | size_t size; | 214 | size_t size; |
227 | struct page_table_gk20a *ptes[gmmu_nr_page_sizes]; | 215 | int pgsz; |
216 | struct gk20a_mm_entry *entries; | ||
228 | }; | 217 | }; |
229 | 218 | ||
230 | struct priv_cmd_queue { | 219 | struct priv_cmd_queue { |
@@ -305,7 +294,7 @@ struct vm_gk20a { | |||
305 | 294 | ||
306 | struct mutex update_gmmu_lock; | 295 | struct mutex update_gmmu_lock; |
307 | 296 | ||
308 | struct page_directory_gk20a pdes; | 297 | struct gk20a_mm_entry pdb; |
309 | 298 | ||
310 | struct gk20a_allocator vma[gmmu_nr_page_sizes]; | 299 | struct gk20a_allocator vma[gmmu_nr_page_sizes]; |
311 | struct rb_root mapped_buffers; | 300 | struct rb_root mapped_buffers; |
@@ -557,9 +546,8 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev); | |||
557 | int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev, | 546 | int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev, |
558 | u64 offset, struct gk20a_buffer_state **state); | 547 | u64 offset, struct gk20a_buffer_state **state); |
559 | 548 | ||
560 | int map_gmmu_pages(void *handle, struct sg_table *sgt, | 549 | int map_gmmu_pages(struct gk20a_mm_entry *entry); |
561 | void **va, size_t size); | 550 | void unmap_gmmu_pages(struct gk20a_mm_entry *entry); |
562 | void unmap_gmmu_pages(void *handle, struct sg_table *sgt, void *va); | ||
563 | void pde_range_from_vaddr_range(struct vm_gk20a *vm, | 551 | void pde_range_from_vaddr_range(struct vm_gk20a *vm, |
564 | u64 addr_lo, u64 addr_hi, | 552 | u64 addr_lo, u64 addr_hi, |
565 | u32 *pde_lo, u32 *pde_hi); | 553 | u32 *pde_lo, u32 *pde_hi); |
@@ -568,14 +556,10 @@ u32 pte_index_from_vaddr(struct vm_gk20a *vm, | |||
568 | u64 addr, enum gmmu_pgsz_gk20a pgsz_idx); | 556 | u64 addr, enum gmmu_pgsz_gk20a pgsz_idx); |
569 | int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm, | 557 | int validate_gmmu_page_table_gk20a_locked(struct vm_gk20a *vm, |
570 | u32 i, enum gmmu_pgsz_gk20a gmmu_pgsz_idx); | 558 | u32 i, enum gmmu_pgsz_gk20a gmmu_pgsz_idx); |
571 | int zalloc_gmmu_page_table_gk20a(struct vm_gk20a *vm, | ||
572 | enum gmmu_pgsz_gk20a gmmu_pgsz_idx, | ||
573 | struct page_table_gk20a *pte); | ||
574 | 559 | ||
575 | void free_gmmu_pages(struct vm_gk20a *vm, void *handle, | ||
576 | struct sg_table *sgt, u32 order, | ||
577 | size_t size); | ||
578 | void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i); | 560 | void update_gmmu_pde_locked(struct vm_gk20a *vm, u32 i); |
561 | void free_gmmu_pages(struct vm_gk20a *vm, | ||
562 | struct gk20a_mm_entry *entry); | ||
579 | 563 | ||
580 | u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g); | 564 | u32 gk20a_mm_get_physical_addr_bits(struct gk20a *g); |
581 | 565 | ||