diff options
author | Alex Waterman <alexw@nvidia.com> | 2016-12-20 16:55:48 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-01-09 15:33:16 -0500 |
commit | 6df3992b60959d32c7113cb77e131a2547174f3a (patch) | |
tree | efbdc9e6ccd2330d5c469ca0783ecb0137da8fc4 /drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |
parent | e229514bece5a109cdbfe263f6329efe987e5939 (diff) |
gpu: nvgpu: Move allocators to common/mm/
Move the GPU allocators to common/mm/ since the allocators are common
code across all GPUs. Also rename the allocator code to move away from
gk20a_ prefixed structs and functions.
This caused one issue with the nvgpu_alloc() and nvgpu_free() functions.
There was a function for allocating either with kmalloc() or vmalloc()
depending on the size of the allocation. Those have now been renamed to
nvgpu_kalloc() and nvgpu_kfree().
Bug 1799159
Change-Id: Iddda92c013612bcb209847084ec85b8953002fa5
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1274400
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.h | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index d32e121a..f58b5df5 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -27,7 +27,8 @@ | |||
27 | #include <linux/version.h> | 27 | #include <linux/version.h> |
28 | #include <asm/dma-iommu.h> | 28 | #include <asm/dma-iommu.h> |
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | #include "gk20a_allocator.h" | 30 | |
31 | #include <nvgpu/allocator.h> | ||
31 | 32 | ||
32 | #ifdef CONFIG_ARM64 | 33 | #ifdef CONFIG_ARM64 |
33 | #define outer_flush_range(a, b) | 34 | #define outer_flush_range(a, b) |
@@ -70,7 +71,7 @@ struct mem_desc { | |||
70 | u64 gpu_va; | 71 | u64 gpu_va; |
71 | bool fixed; /* vidmem only */ | 72 | bool fixed; /* vidmem only */ |
72 | bool user_mem; /* vidmem only */ | 73 | bool user_mem; /* vidmem only */ |
73 | struct gk20a_allocator *allocator; /* vidmem only */ | 74 | struct nvgpu_allocator *allocator; /* vidmem only */ |
74 | struct list_head clear_list_entry; /* vidmem only */ | 75 | struct list_head clear_list_entry; /* vidmem only */ |
75 | bool skip_wmb; | 76 | bool skip_wmb; |
76 | }; | 77 | }; |
@@ -295,10 +296,10 @@ struct vm_gk20a { | |||
295 | 296 | ||
296 | struct gk20a_mm_entry pdb; | 297 | struct gk20a_mm_entry pdb; |
297 | 298 | ||
298 | struct gk20a_allocator vma[gmmu_nr_page_sizes]; | 299 | struct nvgpu_allocator vma[gmmu_nr_page_sizes]; |
299 | 300 | ||
300 | /* If necessary, split fixed from non-fixed. */ | 301 | /* If necessary, split fixed from non-fixed. */ |
301 | struct gk20a_allocator fixed; | 302 | struct nvgpu_allocator fixed; |
302 | 303 | ||
303 | struct rb_root mapped_buffers; | 304 | struct rb_root mapped_buffers; |
304 | 305 | ||
@@ -421,8 +422,8 @@ struct mm_gk20a { | |||
421 | size_t bootstrap_size; | 422 | size_t bootstrap_size; |
422 | u64 bootstrap_base; | 423 | u64 bootstrap_base; |
423 | 424 | ||
424 | struct gk20a_allocator allocator; | 425 | struct nvgpu_allocator allocator; |
425 | struct gk20a_allocator bootstrap_allocator; | 426 | struct nvgpu_allocator bootstrap_allocator; |
426 | 427 | ||
427 | u32 ce_ctx_id; | 428 | u32 ce_ctx_id; |
428 | volatile bool cleared; | 429 | volatile bool cleared; |
@@ -470,13 +471,13 @@ static inline u64 __nv_gmmu_va_small_page_limit(void) | |||
470 | 471 | ||
471 | static inline int __nv_gmmu_va_is_big_page_region(struct vm_gk20a *vm, u64 addr) | 472 | static inline int __nv_gmmu_va_is_big_page_region(struct vm_gk20a *vm, u64 addr) |
472 | { | 473 | { |
473 | struct gk20a_allocator *a = &vm->vma[gmmu_page_size_big]; | 474 | struct nvgpu_allocator *a = &vm->vma[gmmu_page_size_big]; |
474 | 475 | ||
475 | if (!vm->big_pages) | 476 | if (!vm->big_pages) |
476 | return 0; | 477 | return 0; |
477 | 478 | ||
478 | return addr >= gk20a_alloc_base(a) && | 479 | return addr >= nvgpu_alloc_base(a) && |
479 | addr < gk20a_alloc_base(a) + gk20a_alloc_length(a); | 480 | addr < nvgpu_alloc_base(a) + nvgpu_alloc_length(a); |
480 | } | 481 | } |
481 | 482 | ||
482 | /* | 483 | /* |
@@ -825,7 +826,7 @@ void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block); | |||
825 | extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; | 826 | extern const struct gk20a_mmu_level gk20a_mm_levels_64k[]; |
826 | extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; | 827 | extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; |
827 | 828 | ||
828 | static inline void *nvgpu_alloc(size_t size, bool clear) | 829 | static inline void *nvgpu_kalloc(size_t size, bool clear) |
829 | { | 830 | { |
830 | void *p; | 831 | void *p; |
831 | 832 | ||
@@ -844,7 +845,7 @@ static inline void *nvgpu_alloc(size_t size, bool clear) | |||
844 | return p; | 845 | return p; |
845 | } | 846 | } |
846 | 847 | ||
847 | static inline void nvgpu_free(void *p) | 848 | static inline void nvgpu_kfree(void *p) |
848 | { | 849 | { |
849 | if (virt_addr_valid(p)) | 850 | if (virt_addr_valid(p)) |
850 | kfree(p); | 851 | kfree(p); |