summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c72
1 files changed, 36 insertions, 36 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 9c9fad1b..cdd0e541 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -34,7 +34,7 @@
34#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/pramin.h> 35#include <nvgpu/pramin.h>
36#include <nvgpu/list.h> 36#include <nvgpu/list.h>
37#include <nvgpu/mem_desc.h> 37#include <nvgpu/nvgpu_mem.h>
38#include <nvgpu/allocator.h> 38#include <nvgpu/allocator.h>
39#include <nvgpu/semaphore.h> 39#include <nvgpu/semaphore.h>
40#include <nvgpu/page_allocator.h> 40#include <nvgpu/page_allocator.h>
@@ -169,7 +169,7 @@ struct gk20a_dmabuf_priv {
169 169
170struct gk20a_vidmem_buf { 170struct gk20a_vidmem_buf {
171 struct gk20a *g; 171 struct gk20a *g;
172 struct mem_desc *mem; 172 struct nvgpu_mem *mem;
173 struct dma_buf *dmabuf; 173 struct dma_buf *dmabuf;
174 void *dmabuf_priv; 174 void *dmabuf_priv;
175 void (*dmabuf_priv_delete)(void *); 175 void (*dmabuf_priv_delete)(void *);
@@ -457,7 +457,7 @@ static int gk20a_init_mm_reset_enable_hw(struct gk20a *g)
457 return 0; 457 return 0;
458} 458}
459 459
460void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block) 460void gk20a_remove_vm(struct vm_gk20a *vm, struct nvgpu_mem *inst_block)
461{ 461{
462 struct gk20a *g = vm->mm->g; 462 struct gk20a *g = vm->mm->g;
463 463
@@ -1866,7 +1866,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
1866 nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex); 1866 nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex);
1867 } 1867 }
1868 1868
1869 buf->mem = nvgpu_kzalloc(g, sizeof(struct mem_desc)); 1869 buf->mem = nvgpu_kzalloc(g, sizeof(struct nvgpu_mem));
1870 if (!buf->mem) 1870 if (!buf->mem)
1871 goto err_kfree; 1871 goto err_kfree;
1872 1872
@@ -1931,7 +1931,7 @@ int gk20a_vidbuf_access_memory(struct gk20a *g, struct dma_buf *dmabuf,
1931{ 1931{
1932#if defined(CONFIG_GK20A_VIDMEM) 1932#if defined(CONFIG_GK20A_VIDMEM)
1933 struct gk20a_vidmem_buf *vidmem_buf; 1933 struct gk20a_vidmem_buf *vidmem_buf;
1934 struct mem_desc *mem; 1934 struct nvgpu_mem *mem;
1935 int err = 0; 1935 int err = 0;
1936 1936
1937 if (gk20a_dmabuf_aperture(g, dmabuf) != APERTURE_VIDMEM) 1937 if (gk20a_dmabuf_aperture(g, dmabuf) != APERTURE_VIDMEM)
@@ -2519,13 +2519,13 @@ u64 gk20a_gmmu_fixed_map(struct vm_gk20a *vm,
2519 aperture); 2519 aperture);
2520} 2520}
2521 2521
2522int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct mem_desc *mem) 2522int gk20a_gmmu_alloc(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
2523{ 2523{
2524 return gk20a_gmmu_alloc_flags(g, 0, size, mem); 2524 return gk20a_gmmu_alloc_flags(g, 0, size, mem);
2525} 2525}
2526 2526
2527int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size, 2527int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
2528 struct mem_desc *mem) 2528 struct nvgpu_mem *mem)
2529{ 2529{
2530 if (g->mm.vidmem_is_vidmem) { 2530 if (g->mm.vidmem_is_vidmem) {
2531 /* 2531 /*
@@ -2549,7 +2549,7 @@ int gk20a_gmmu_alloc_flags(struct gk20a *g, unsigned long flags, size_t size,
2549 return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem); 2549 return gk20a_gmmu_alloc_flags_sys(g, flags, size, mem);
2550} 2550}
2551 2551
2552int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct mem_desc *mem) 2552int gk20a_gmmu_alloc_sys(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
2553{ 2553{
2554 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem); 2554 return gk20a_gmmu_alloc_flags_sys(g, 0, size, mem);
2555} 2555}
@@ -2574,7 +2574,7 @@ static void gk20a_dma_flags_to_attrs(struct dma_attrs *attrs,
2574} 2574}
2575 2575
2576int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags, 2576int gk20a_gmmu_alloc_flags_sys(struct gk20a *g, unsigned long flags,
2577 size_t size, struct mem_desc *mem) 2577 size_t size, struct nvgpu_mem *mem)
2578{ 2578{
2579 struct device *d = dev_from_gk20a(g); 2579 struct device *d = dev_from_gk20a(g);
2580 int err; 2580 int err;
@@ -2631,7 +2631,7 @@ fail_free:
2631 return err; 2631 return err;
2632} 2632}
2633 2633
2634static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem) 2634static void gk20a_gmmu_free_sys(struct gk20a *g, struct nvgpu_mem *mem)
2635{ 2635{
2636 struct device *d = dev_from_gk20a(g); 2636 struct device *d = dev_from_gk20a(g);
2637 2637
@@ -2666,7 +2666,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem)
2666} 2666}
2667 2667
2668#if defined(CONFIG_GK20A_VIDMEM) 2668#if defined(CONFIG_GK20A_VIDMEM)
2669static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem) 2669static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct nvgpu_mem *mem)
2670{ 2670{
2671 struct gk20a_fence *gk20a_fence_out = NULL; 2671 struct gk20a_fence *gk20a_fence_out = NULL;
2672 struct gk20a_fence *gk20a_last_fence = NULL; 2672 struct gk20a_fence *gk20a_last_fence = NULL;
@@ -2728,14 +2728,14 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
2728} 2728}
2729#endif 2729#endif
2730 2730
2731int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct mem_desc *mem) 2731int gk20a_gmmu_alloc_vid(struct gk20a *g, size_t size, struct nvgpu_mem *mem)
2732{ 2732{
2733 return gk20a_gmmu_alloc_flags_vid(g, 2733 return gk20a_gmmu_alloc_flags_vid(g,
2734 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); 2734 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
2735} 2735}
2736 2736
2737int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags, 2737int gk20a_gmmu_alloc_flags_vid(struct gk20a *g, unsigned long flags,
2738 size_t size, struct mem_desc *mem) 2738 size_t size, struct nvgpu_mem *mem)
2739{ 2739{
2740 return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0); 2740 return gk20a_gmmu_alloc_flags_vid_at(g, flags, size, mem, 0);
2741} 2741}
@@ -2756,7 +2756,7 @@ static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
2756#endif 2756#endif
2757 2757
2758int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags, 2758int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
2759 size_t size, struct mem_desc *mem, dma_addr_t at) 2759 size_t size, struct nvgpu_mem *mem, dma_addr_t at)
2760{ 2760{
2761#if defined(CONFIG_GK20A_VIDMEM) 2761#if defined(CONFIG_GK20A_VIDMEM)
2762 u64 addr; 2762 u64 addr;
@@ -2831,7 +2831,7 @@ fail_physfree:
2831#endif 2831#endif
2832} 2832}
2833 2833
2834static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem) 2834static void gk20a_gmmu_free_vid(struct gk20a *g, struct nvgpu_mem *mem)
2835{ 2835{
2836#if defined(CONFIG_GK20A_VIDMEM) 2836#if defined(CONFIG_GK20A_VIDMEM)
2837 bool was_empty; 2837 bool was_empty;
@@ -2863,7 +2863,7 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
2863#endif 2863#endif
2864} 2864}
2865 2865
2866void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem) 2866void gk20a_gmmu_free(struct gk20a *g, struct nvgpu_mem *mem)
2867{ 2867{
2868 switch (mem->aperture) { 2868 switch (mem->aperture) {
2869 case APERTURE_SYSMEM: 2869 case APERTURE_SYSMEM:
@@ -2879,7 +2879,7 @@ void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
2879 * If mem is in VIDMEM, return base address in vidmem 2879 * If mem is in VIDMEM, return base address in vidmem
2880 * else return IOVA address for SYSMEM 2880 * else return IOVA address for SYSMEM
2881 */ 2881 */
2882u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem, 2882u64 gk20a_mem_get_base_addr(struct gk20a *g, struct nvgpu_mem *mem,
2883 u32 flags) 2883 u32 flags)
2884{ 2884{
2885 struct nvgpu_page_alloc *alloc; 2885 struct nvgpu_page_alloc *alloc;
@@ -2900,14 +2900,14 @@ u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem,
2900} 2900}
2901 2901
2902#if defined(CONFIG_GK20A_VIDMEM) 2902#if defined(CONFIG_GK20A_VIDMEM)
2903static struct mem_desc *get_pending_mem_desc(struct mm_gk20a *mm) 2903static struct nvgpu_mem *get_pending_mem_desc(struct mm_gk20a *mm)
2904{ 2904{
2905 struct mem_desc *mem = NULL; 2905 struct nvgpu_mem *mem = NULL;
2906 2906
2907 nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex); 2907 nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex);
2908 if (!nvgpu_list_empty(&mm->vidmem.clear_list_head)) { 2908 if (!nvgpu_list_empty(&mm->vidmem.clear_list_head)) {
2909 mem = nvgpu_list_first_entry(&mm->vidmem.clear_list_head, 2909 mem = nvgpu_list_first_entry(&mm->vidmem.clear_list_head,
2910 mem_desc, clear_list_entry); 2910 nvgpu_mem, clear_list_entry);
2911 nvgpu_list_del(&mem->clear_list_entry); 2911 nvgpu_list_del(&mem->clear_list_entry);
2912 } 2912 }
2913 nvgpu_mutex_release(&mm->vidmem.clear_list_mutex); 2913 nvgpu_mutex_release(&mm->vidmem.clear_list_mutex);
@@ -2920,7 +2920,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
2920 struct mm_gk20a *mm = container_of(work, struct mm_gk20a, 2920 struct mm_gk20a *mm = container_of(work, struct mm_gk20a,
2921 vidmem.clear_mem_worker); 2921 vidmem.clear_mem_worker);
2922 struct gk20a *g = mm->g; 2922 struct gk20a *g = mm->g;
2923 struct mem_desc *mem; 2923 struct nvgpu_mem *mem;
2924 2924
2925 while ((mem = get_pending_mem_desc(mm)) != NULL) { 2925 while ((mem = get_pending_mem_desc(mm)) != NULL) {
2926 gk20a_gmmu_clear_vidmem_mem(g, mem); 2926 gk20a_gmmu_clear_vidmem_mem(g, mem);
@@ -2939,13 +2939,13 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
2939#endif 2939#endif
2940 2940
2941int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size, 2941int gk20a_gmmu_alloc_map(struct vm_gk20a *vm, size_t size,
2942 struct mem_desc *mem) 2942 struct nvgpu_mem *mem)
2943{ 2943{
2944 return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem); 2944 return gk20a_gmmu_alloc_map_flags(vm, 0, size, mem);
2945} 2945}
2946 2946
2947int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags, 2947int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
2948 size_t size, struct mem_desc *mem) 2948 size_t size, struct nvgpu_mem *mem)
2949{ 2949{
2950 if (vm->mm->vidmem_is_vidmem) { 2950 if (vm->mm->vidmem_is_vidmem) {
2951 /* 2951 /*
@@ -2970,13 +2970,13 @@ int gk20a_gmmu_alloc_map_flags(struct vm_gk20a *vm, unsigned long flags,
2970} 2970}
2971 2971
2972int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size, 2972int gk20a_gmmu_alloc_map_sys(struct vm_gk20a *vm, size_t size,
2973 struct mem_desc *mem) 2973 struct nvgpu_mem *mem)
2974{ 2974{
2975 return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem); 2975 return gk20a_gmmu_alloc_map_flags_sys(vm, 0, size, mem);
2976} 2976}
2977 2977
2978int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags, 2978int gk20a_gmmu_alloc_map_flags_sys(struct vm_gk20a *vm, unsigned long flags,
2979 size_t size, struct mem_desc *mem) 2979 size_t size, struct nvgpu_mem *mem)
2980{ 2980{
2981 int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem); 2981 int err = gk20a_gmmu_alloc_flags_sys(vm->mm->g, flags, size, mem);
2982 2982
@@ -2999,14 +2999,14 @@ fail_free:
2999} 2999}
3000 3000
3001int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size, 3001int gk20a_gmmu_alloc_map_vid(struct vm_gk20a *vm, size_t size,
3002 struct mem_desc *mem) 3002 struct nvgpu_mem *mem)
3003{ 3003{
3004 return gk20a_gmmu_alloc_map_flags_vid(vm, 3004 return gk20a_gmmu_alloc_map_flags_vid(vm,
3005 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem); 3005 NVGPU_DMA_NO_KERNEL_MAPPING, size, mem);
3006} 3006}
3007 3007
3008int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags, 3008int gk20a_gmmu_alloc_map_flags_vid(struct vm_gk20a *vm, unsigned long flags,
3009 size_t size, struct mem_desc *mem) 3009 size_t size, struct nvgpu_mem *mem)
3010{ 3010{
3011 int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem); 3011 int err = gk20a_gmmu_alloc_flags_vid(vm->mm->g, flags, size, mem);
3012 3012
@@ -3028,7 +3028,7 @@ fail_free:
3028 return err; 3028 return err;
3029} 3029}
3030 3030
3031void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct mem_desc *mem) 3031void gk20a_gmmu_unmap_free(struct vm_gk20a *vm, struct nvgpu_mem *mem)
3032{ 3032{
3033 if (mem->gpu_va) 3033 if (mem->gpu_va)
3034 gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none); 3034 gk20a_gmmu_unmap(vm, mem->gpu_va, mem->size, gk20a_mem_flag_none);
@@ -4583,7 +4583,7 @@ void gk20a_deinit_vm(struct vm_gk20a *vm)
4583 gk20a_vm_free_entries(vm, &vm->pdb, 0); 4583 gk20a_vm_free_entries(vm, &vm->pdb, 0);
4584} 4584}
4585 4585
4586int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block) 4586int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4587{ 4587{
4588 struct device *dev = dev_from_gk20a(g); 4588 struct device *dev = dev_from_gk20a(g);
4589 int err; 4589 int err;
@@ -4600,13 +4600,13 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct mem_desc *inst_block)
4600 return 0; 4600 return 0;
4601} 4601}
4602 4602
4603void gk20a_free_inst_block(struct gk20a *g, struct mem_desc *inst_block) 4603void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block)
4604{ 4604{
4605 if (inst_block->size) 4605 if (inst_block->size)
4606 gk20a_gmmu_free(g, inst_block); 4606 gk20a_gmmu_free(g, inst_block);
4607} 4607}
4608 4608
4609u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct mem_desc *inst_block) 4609u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block)
4610{ 4610{
4611 u64 addr; 4611 u64 addr;
4612 if (g->mm.has_physical_mode) 4612 if (g->mm.has_physical_mode)
@@ -4622,7 +4622,7 @@ static int gk20a_init_bar1_vm(struct mm_gk20a *mm)
4622 int err; 4622 int err;
4623 struct vm_gk20a *vm = &mm->bar1.vm; 4623 struct vm_gk20a *vm = &mm->bar1.vm;
4624 struct gk20a *g = gk20a_from_mm(mm); 4624 struct gk20a *g = gk20a_from_mm(mm);
4625 struct mem_desc *inst_block = &mm->bar1.inst_block; 4625 struct nvgpu_mem *inst_block = &mm->bar1.inst_block;
4626 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size; 4626 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
4627 4627
4628 mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20; 4628 mm->bar1.aperture_size = bar1_aperture_size_mb_gk20a() << 20;
@@ -4653,7 +4653,7 @@ static int gk20a_init_system_vm(struct mm_gk20a *mm)
4653 int err; 4653 int err;
4654 struct vm_gk20a *vm = &mm->pmu.vm; 4654 struct vm_gk20a *vm = &mm->pmu.vm;
4655 struct gk20a *g = gk20a_from_mm(mm); 4655 struct gk20a *g = gk20a_from_mm(mm);
4656 struct mem_desc *inst_block = &mm->pmu.inst_block; 4656 struct nvgpu_mem *inst_block = &mm->pmu.inst_block;
4657 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size; 4657 u32 big_page_size = gk20a_get_platform(g->dev)->default_big_page_size;
4658 u32 low_hole, aperture_size; 4658 u32 low_hole, aperture_size;
4659 4659
@@ -4691,7 +4691,7 @@ static int gk20a_init_hwpm(struct mm_gk20a *mm)
4691 int err; 4691 int err;
4692 struct vm_gk20a *vm = &mm->pmu.vm; 4692 struct vm_gk20a *vm = &mm->pmu.vm;
4693 struct gk20a *g = gk20a_from_mm(mm); 4693 struct gk20a *g = gk20a_from_mm(mm);
4694 struct mem_desc *inst_block = &mm->hwpm.inst_block; 4694 struct nvgpu_mem *inst_block = &mm->hwpm.inst_block;
4695 4695
4696 err = gk20a_alloc_inst_block(g, inst_block); 4696 err = gk20a_alloc_inst_block(g, inst_block);
4697 if (err) 4697 if (err)
@@ -4727,7 +4727,7 @@ static int gk20a_init_ce_vm(struct mm_gk20a *mm)
4727 false, false, "ce"); 4727 false, false, "ce");
4728} 4728}
4729 4729
4730void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block, 4730void gk20a_mm_init_pdb(struct gk20a *g, struct nvgpu_mem *inst_block,
4731 struct vm_gk20a *vm) 4731 struct vm_gk20a *vm)
4732{ 4732{
4733 u64 pdb_addr = gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0); 4733 u64 pdb_addr = gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0);
@@ -4747,7 +4747,7 @@ void gk20a_mm_init_pdb(struct gk20a *g, struct mem_desc *inst_block,
4747 ram_in_page_dir_base_hi_f(pdb_addr_hi)); 4747 ram_in_page_dir_base_hi_f(pdb_addr_hi));
4748} 4748}
4749 4749
4750void gk20a_init_inst_block(struct mem_desc *inst_block, struct vm_gk20a *vm, 4750void gk20a_init_inst_block(struct nvgpu_mem *inst_block, struct vm_gk20a *vm,
4751 u32 big_page_size) 4751 u32 big_page_size)
4752{ 4752{
4753 struct gk20a *g = gk20a_from_vm(vm); 4753 struct gk20a *g = gk20a_from_vm(vm);