summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c114
1 files changed, 57 insertions, 57 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 2e338fef..d594a5a4 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -31,9 +31,9 @@
31#include <uapi/linux/nvgpu.h> 31#include <uapi/linux/nvgpu.h>
32#include <trace/events/gk20a.h> 32#include <trace/events/gk20a.h>
33 33
34#include <gk20a/page_allocator_priv.h>
35
36#include <nvgpu/timers.h> 34#include <nvgpu/timers.h>
35#include <nvgpu/allocator.h>
36#include <nvgpu/page_allocator.h>
37 37
38#include "gk20a.h" 38#include "gk20a.h"
39#include "mm_gk20a.h" 39#include "mm_gk20a.h"
@@ -74,7 +74,7 @@ is_vidmem_page_alloc(u64 addr)
74 return !!(addr & 1ULL); 74 return !!(addr & 1ULL);
75} 75}
76 76
77static inline struct gk20a_page_alloc * 77static inline struct nvgpu_page_alloc *
78get_vidmem_page_alloc(struct scatterlist *sgl) 78get_vidmem_page_alloc(struct scatterlist *sgl)
79{ 79{
80 u64 addr; 80 u64 addr;
@@ -86,7 +86,7 @@ get_vidmem_page_alloc(struct scatterlist *sgl)
86 else 86 else
87 WARN_ON(1); 87 WARN_ON(1);
88 88
89 return (struct gk20a_page_alloc *)(uintptr_t)addr; 89 return (struct nvgpu_page_alloc *)(uintptr_t)addr;
90} 90}
91 91
92int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem) 92int gk20a_mem_begin(struct gk20a *g, struct mem_desc *mem)
@@ -176,7 +176,7 @@ typedef void (*pramin_access_batch_fn)(struct gk20a *g, u32 start, u32 words,
176static inline void pramin_access_batched(struct gk20a *g, struct mem_desc *mem, 176static inline void pramin_access_batched(struct gk20a *g, struct mem_desc *mem,
177 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg) 177 u32 offset, u32 size, pramin_access_batch_fn loop, u32 **arg)
178{ 178{
179 struct gk20a_page_alloc *alloc = NULL; 179 struct nvgpu_page_alloc *alloc = NULL;
180 struct page_alloc_chunk *chunk = NULL; 180 struct page_alloc_chunk *chunk = NULL;
181 u32 byteoff, start_reg, until_end, n; 181 u32 byteoff, start_reg, until_end, n;
182 182
@@ -797,8 +797,8 @@ void gk20a_remove_vm(struct vm_gk20a *vm, struct mem_desc *inst_block)
797static void gk20a_vidmem_destroy(struct gk20a *g) 797static void gk20a_vidmem_destroy(struct gk20a *g)
798{ 798{
799#if defined(CONFIG_GK20A_VIDMEM) 799#if defined(CONFIG_GK20A_VIDMEM)
800 if (gk20a_alloc_initialized(&g->mm.vidmem.allocator)) 800 if (nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
801 gk20a_alloc_destroy(&g->mm.vidmem.allocator); 801 nvgpu_alloc_destroy(&g->mm.vidmem.allocator);
802#endif 802#endif
803} 803}
804 804
@@ -928,8 +928,8 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
928 u64 default_page_size = SZ_64K; 928 u64 default_page_size = SZ_64K;
929 int err; 929 int err;
930 930
931 static struct gk20a_alloc_carveout wpr_co = 931 static struct nvgpu_alloc_carveout wpr_co =
932 GK20A_CARVEOUT("wpr-region", 0, SZ_16M); 932 NVGPU_CARVEOUT("wpr-region", 0, SZ_16M);
933 933
934 if (!size) 934 if (!size)
935 return 0; 935 return 0;
@@ -944,12 +944,12 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
944 * initialization requires vidmem but we want to use the CE to zero 944 * initialization requires vidmem but we want to use the CE to zero
945 * out vidmem before allocating it... 945 * out vidmem before allocating it...
946 */ 946 */
947 err = gk20a_page_allocator_init(g, &g->mm.vidmem.bootstrap_allocator, 947 err = nvgpu_page_allocator_init(g, &g->mm.vidmem.bootstrap_allocator,
948 "vidmem-bootstrap", 948 "vidmem-bootstrap",
949 bootstrap_base, bootstrap_size, 949 bootstrap_base, bootstrap_size,
950 SZ_4K, 0); 950 SZ_4K, 0);
951 951
952 err = gk20a_page_allocator_init(g, &g->mm.vidmem.allocator, 952 err = nvgpu_page_allocator_init(g, &g->mm.vidmem.allocator,
953 "vidmem", 953 "vidmem",
954 base, size - base, 954 base, size - base,
955 default_page_size, 955 default_page_size,
@@ -961,7 +961,7 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
961 } 961 }
962 962
963 /* Reserve bootstrap region in vidmem allocator */ 963 /* Reserve bootstrap region in vidmem allocator */
964 gk20a_alloc_reserve_carveout(&g->mm.vidmem.allocator, &wpr_co); 964 nvgpu_alloc_reserve_carveout(&g->mm.vidmem.allocator, &wpr_co);
965 965
966 mm->vidmem.base = base; 966 mm->vidmem.base = base;
967 mm->vidmem.size = size - base; 967 mm->vidmem.size = size - base;
@@ -1482,7 +1482,7 @@ int gk20a_vm_get_buffers(struct vm_gk20a *vm,
1482 1482
1483 mutex_lock(&vm->update_gmmu_lock); 1483 mutex_lock(&vm->update_gmmu_lock);
1484 1484
1485 buffer_list = nvgpu_alloc(sizeof(*buffer_list) * 1485 buffer_list = nvgpu_kalloc(sizeof(*buffer_list) *
1486 vm->num_user_mapped_buffers, true); 1486 vm->num_user_mapped_buffers, true);
1487 if (!buffer_list) { 1487 if (!buffer_list) {
1488 mutex_unlock(&vm->update_gmmu_lock); 1488 mutex_unlock(&vm->update_gmmu_lock);
@@ -1567,7 +1567,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1567 gk20a_vm_mapping_batch_finish_locked(vm, &batch); 1567 gk20a_vm_mapping_batch_finish_locked(vm, &batch);
1568 mutex_unlock(&vm->update_gmmu_lock); 1568 mutex_unlock(&vm->update_gmmu_lock);
1569 1569
1570 nvgpu_free(mapped_buffers); 1570 nvgpu_kfree(mapped_buffers);
1571} 1571}
1572 1572
1573static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 1573static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
@@ -1623,7 +1623,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1623 enum gmmu_pgsz_gk20a gmmu_pgsz_idx) 1623 enum gmmu_pgsz_gk20a gmmu_pgsz_idx)
1624 1624
1625{ 1625{
1626 struct gk20a_allocator *vma = &vm->vma[gmmu_pgsz_idx]; 1626 struct nvgpu_allocator *vma = &vm->vma[gmmu_pgsz_idx];
1627 u64 offset; 1627 u64 offset;
1628 u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx]; 1628 u64 gmmu_page_size = vm->gmmu_page_sizes[gmmu_pgsz_idx];
1629 1629
@@ -1645,7 +1645,7 @@ u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
1645 gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size, 1645 gk20a_dbg_info("size=0x%llx @ pgsz=%dKB", size,
1646 vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10); 1646 vm->gmmu_page_sizes[gmmu_pgsz_idx]>>10);
1647 1647
1648 offset = gk20a_alloc(vma, size); 1648 offset = nvgpu_alloc(vma, size);
1649 if (!offset) { 1649 if (!offset) {
1650 gk20a_err(dev_from_vm(vm), 1650 gk20a_err(dev_from_vm(vm),
1651 "%s oom: sz=0x%llx", vma->name, size); 1651 "%s oom: sz=0x%llx", vma->name, size);
@@ -1660,11 +1660,11 @@ int gk20a_vm_free_va(struct vm_gk20a *vm,
1660 u64 offset, u64 size, 1660 u64 offset, u64 size,
1661 enum gmmu_pgsz_gk20a pgsz_idx) 1661 enum gmmu_pgsz_gk20a pgsz_idx)
1662{ 1662{
1663 struct gk20a_allocator *vma = &vm->vma[pgsz_idx]; 1663 struct nvgpu_allocator *vma = &vm->vma[pgsz_idx];
1664 1664
1665 gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx", 1665 gk20a_dbg_info("%s free addr=0x%llx, size=0x%llx",
1666 vma->name, offset, size); 1666 vma->name, offset, size);
1667 gk20a_free(vma, offset); 1667 nvgpu_free(vma, offset);
1668 1668
1669 return 0; 1669 return 0;
1670} 1670}
@@ -2302,15 +2302,15 @@ err_kfree:
2302int gk20a_vidmem_get_space(struct gk20a *g, u64 *space) 2302int gk20a_vidmem_get_space(struct gk20a *g, u64 *space)
2303{ 2303{
2304#if defined(CONFIG_GK20A_VIDMEM) 2304#if defined(CONFIG_GK20A_VIDMEM)
2305 struct gk20a_allocator *allocator = &g->mm.vidmem.allocator; 2305 struct nvgpu_allocator *allocator = &g->mm.vidmem.allocator;
2306 2306
2307 gk20a_dbg_fn(""); 2307 gk20a_dbg_fn("");
2308 2308
2309 if (!gk20a_alloc_initialized(allocator)) 2309 if (!nvgpu_alloc_initialized(allocator))
2310 return -ENOSYS; 2310 return -ENOSYS;
2311 2311
2312 mutex_lock(&g->mm.vidmem.clear_list_mutex); 2312 mutex_lock(&g->mm.vidmem.clear_list_mutex);
2313 *space = gk20a_alloc_space(allocator) + 2313 *space = nvgpu_alloc_space(allocator) +
2314 atomic64_read(&g->mm.vidmem.bytes_pending); 2314 atomic64_read(&g->mm.vidmem.bytes_pending);
2315 mutex_unlock(&g->mm.vidmem.clear_list_mutex); 2315 mutex_unlock(&g->mm.vidmem.clear_list_mutex);
2316 return 0; 2316 return 0;
@@ -2359,7 +2359,7 @@ static u64 gk20a_mm_get_align(struct gk20a *g, struct scatterlist *sgl,
2359 u64 buf_addr; 2359 u64 buf_addr;
2360 2360
2361 if (aperture == APERTURE_VIDMEM) { 2361 if (aperture == APERTURE_VIDMEM) {
2362 struct gk20a_page_alloc *alloc = get_vidmem_page_alloc(sgl); 2362 struct nvgpu_page_alloc *alloc = get_vidmem_page_alloc(sgl);
2363 struct page_alloc_chunk *chunk = NULL; 2363 struct page_alloc_chunk *chunk = NULL;
2364 2364
2365 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) { 2365 list_for_each_entry(chunk, &alloc->alloc_chunks, list_entry) {
@@ -3068,7 +3068,7 @@ static int gk20a_gmmu_clear_vidmem_mem(struct gk20a *g, struct mem_desc *mem)
3068{ 3068{
3069 struct gk20a_fence *gk20a_fence_out = NULL; 3069 struct gk20a_fence *gk20a_fence_out = NULL;
3070 struct gk20a_fence *gk20a_last_fence = NULL; 3070 struct gk20a_fence *gk20a_last_fence = NULL;
3071 struct gk20a_page_alloc *alloc = NULL; 3071 struct nvgpu_page_alloc *alloc = NULL;
3072 struct page_alloc_chunk *chunk = NULL; 3072 struct page_alloc_chunk *chunk = NULL;
3073 int err = 0; 3073 int err = 0;
3074 3074
@@ -3134,15 +3134,15 @@ int gk20a_gmmu_alloc_attr_vid(struct gk20a *g, enum dma_attr attr,
3134} 3134}
3135 3135
3136#if defined(CONFIG_GK20A_VIDMEM) 3136#if defined(CONFIG_GK20A_VIDMEM)
3137static u64 __gk20a_gmmu_alloc(struct gk20a_allocator *allocator, dma_addr_t at, 3137static u64 __gk20a_gmmu_alloc(struct nvgpu_allocator *allocator, dma_addr_t at,
3138 size_t size) 3138 size_t size)
3139{ 3139{
3140 u64 addr = 0; 3140 u64 addr = 0;
3141 3141
3142 if (at) 3142 if (at)
3143 addr = gk20a_alloc_fixed(allocator, at, size); 3143 addr = nvgpu_alloc_fixed(allocator, at, size);
3144 else 3144 else
3145 addr = gk20a_alloc(allocator, size); 3145 addr = nvgpu_alloc(allocator, size);
3146 3146
3147 return addr; 3147 return addr;
3148} 3148}
@@ -3154,14 +3154,14 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
3154#if defined(CONFIG_GK20A_VIDMEM) 3154#if defined(CONFIG_GK20A_VIDMEM)
3155 u64 addr; 3155 u64 addr;
3156 int err; 3156 int err;
3157 struct gk20a_allocator *vidmem_alloc = g->mm.vidmem.cleared ? 3157 struct nvgpu_allocator *vidmem_alloc = g->mm.vidmem.cleared ?
3158 &g->mm.vidmem.allocator : 3158 &g->mm.vidmem.allocator :
3159 &g->mm.vidmem.bootstrap_allocator; 3159 &g->mm.vidmem.bootstrap_allocator;
3160 int before_pending; 3160 int before_pending;
3161 3161
3162 gk20a_dbg_fn(""); 3162 gk20a_dbg_fn("");
3163 3163
3164 if (!gk20a_alloc_initialized(&g->mm.vidmem.allocator)) 3164 if (!nvgpu_alloc_initialized(&g->mm.vidmem.allocator))
3165 return -ENOSYS; 3165 return -ENOSYS;
3166 3166
3167 /* we don't support dma attributes here, except that kernel mappings 3167 /* we don't support dma attributes here, except that kernel mappings
@@ -3214,7 +3214,7 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
3214fail_kfree: 3214fail_kfree:
3215 kfree(mem->sgt); 3215 kfree(mem->sgt);
3216fail_physfree: 3216fail_physfree:
3217 gk20a_free(&g->mm.vidmem.allocator, addr); 3217 nvgpu_free(&g->mm.vidmem.allocator, addr);
3218 return err; 3218 return err;
3219#else 3219#else
3220 return -ENOSYS; 3220 return -ENOSYS;
@@ -3241,7 +3241,7 @@ static void gk20a_gmmu_free_attr_vid(struct gk20a *g, enum dma_attr attr,
3241 } 3241 }
3242 } else { 3242 } else {
3243 gk20a_memset(g, mem, 0, 0, mem->size); 3243 gk20a_memset(g, mem, 0, 0, mem->size);
3244 gk20a_free(mem->allocator, 3244 nvgpu_free(mem->allocator,
3245 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 3245 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
3246 gk20a_free_sgtable(&mem->sgt); 3246 gk20a_free_sgtable(&mem->sgt);
3247 3247
@@ -3276,7 +3276,7 @@ void gk20a_gmmu_free(struct gk20a *g, struct mem_desc *mem)
3276u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem, 3276u64 gk20a_mem_get_base_addr(struct gk20a *g, struct mem_desc *mem,
3277 u32 flags) 3277 u32 flags)
3278{ 3278{
3279 struct gk20a_page_alloc *alloc; 3279 struct nvgpu_page_alloc *alloc;
3280 u64 addr; 3280 u64 addr;
3281 3281
3282 if (mem->aperture == APERTURE_VIDMEM) { 3282 if (mem->aperture == APERTURE_VIDMEM) {
@@ -3317,7 +3317,7 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
3317 3317
3318 while ((mem = get_pending_mem_desc(mm)) != NULL) { 3318 while ((mem = get_pending_mem_desc(mm)) != NULL) {
3319 gk20a_gmmu_clear_vidmem_mem(g, mem); 3319 gk20a_gmmu_clear_vidmem_mem(g, mem);
3320 gk20a_free(mem->allocator, 3320 nvgpu_free(mem->allocator,
3321 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 3321 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
3322 gk20a_free_sgtable(&mem->sgt); 3322 gk20a_free_sgtable(&mem->sgt);
3323 3323
@@ -3905,7 +3905,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3905 u32 page_size = vm->gmmu_page_sizes[pgsz_idx]; 3905 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
3906 int err; 3906 int err;
3907 struct scatterlist *sgl = NULL; 3907 struct scatterlist *sgl = NULL;
3908 struct gk20a_page_alloc *alloc = NULL; 3908 struct nvgpu_page_alloc *alloc = NULL;
3909 struct page_alloc_chunk *chunk = NULL; 3909 struct page_alloc_chunk *chunk = NULL;
3910 u64 length; 3910 u64 length;
3911 3911
@@ -4251,12 +4251,12 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4251 * 4251 *
4252 * !!! TODO: cleanup. 4252 * !!! TODO: cleanup.
4253 */ 4253 */
4254 sema_sea->gpu_va = gk20a_alloc_fixed(&vm->vma[gmmu_page_size_kernel], 4254 sema_sea->gpu_va = nvgpu_alloc_fixed(&vm->vma[gmmu_page_size_kernel],
4255 vm->va_limit - 4255 vm->va_limit -
4256 mm->channel.kernel_size, 4256 mm->channel.kernel_size,
4257 512 * PAGE_SIZE); 4257 512 * PAGE_SIZE);
4258 if (!sema_sea->gpu_va) { 4258 if (!sema_sea->gpu_va) {
4259 gk20a_free(&vm->vma[gmmu_page_size_small], sema_sea->gpu_va); 4259 nvgpu_free(&vm->vma[gmmu_page_size_small], sema_sea->gpu_va);
4260 gk20a_vm_put(vm); 4260 gk20a_vm_put(vm);
4261 return -ENOMEM; 4261 return -ENOMEM;
4262 } 4262 }
@@ -4264,7 +4264,7 @@ static int gk20a_init_sema_pool(struct vm_gk20a *vm)
4264 err = gk20a_semaphore_pool_map(vm->sema_pool, vm); 4264 err = gk20a_semaphore_pool_map(vm->sema_pool, vm);
4265 if (err) { 4265 if (err) {
4266 gk20a_semaphore_pool_unmap(vm->sema_pool, vm); 4266 gk20a_semaphore_pool_unmap(vm->sema_pool, vm);
4267 gk20a_free(&vm->vma[gmmu_page_size_small], 4267 nvgpu_free(&vm->vma[gmmu_page_size_small],
4268 vm->sema_pool->gpu_va); 4268 vm->sema_pool->gpu_va);
4269 gk20a_vm_put(vm); 4269 gk20a_vm_put(vm);
4270 } 4270 }
@@ -4387,7 +4387,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4387 snprintf(alloc_name, sizeof(alloc_name), 4387 snprintf(alloc_name, sizeof(alloc_name),
4388 "gk20a_%s-fixed", name); 4388 "gk20a_%s-fixed", name);
4389 4389
4390 err = __gk20a_buddy_allocator_init(g, &vm->fixed, 4390 err = __nvgpu_buddy_allocator_init(g, &vm->fixed,
4391 vm, alloc_name, 4391 vm, alloc_name,
4392 small_vma_start, 4392 small_vma_start,
4393 g->separate_fixed_allocs, 4393 g->separate_fixed_allocs,
@@ -4404,7 +4404,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4404 if (small_vma_start < small_vma_limit) { 4404 if (small_vma_start < small_vma_limit) {
4405 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", name, 4405 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", name,
4406 vm->gmmu_page_sizes[gmmu_page_size_small] >> 10); 4406 vm->gmmu_page_sizes[gmmu_page_size_small] >> 10);
4407 err = __gk20a_buddy_allocator_init( 4407 err = __nvgpu_buddy_allocator_init(
4408 g, 4408 g,
4409 &vm->vma[gmmu_page_size_small], 4409 &vm->vma[gmmu_page_size_small],
4410 vm, alloc_name, 4410 vm, alloc_name,
@@ -4420,7 +4420,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4420 if (large_vma_start < large_vma_limit) { 4420 if (large_vma_start < large_vma_limit) {
4421 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB", 4421 snprintf(alloc_name, sizeof(alloc_name), "gk20a_%s-%dKB",
4422 name, vm->gmmu_page_sizes[gmmu_page_size_big] >> 10); 4422 name, vm->gmmu_page_sizes[gmmu_page_size_big] >> 10);
4423 err = __gk20a_buddy_allocator_init( 4423 err = __nvgpu_buddy_allocator_init(
4424 g, 4424 g,
4425 &vm->vma[gmmu_page_size_big], 4425 &vm->vma[gmmu_page_size_big],
4426 vm, alloc_name, 4426 vm, alloc_name,
@@ -4438,7 +4438,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4438 /* 4438 /*
4439 * kernel reserved VMA is at the end of the aperture 4439 * kernel reserved VMA is at the end of the aperture
4440 */ 4440 */
4441 err = __gk20a_buddy_allocator_init(g, &vm->vma[gmmu_page_size_kernel], 4441 err = __nvgpu_buddy_allocator_init(g, &vm->vma[gmmu_page_size_kernel],
4442 vm, alloc_name, 4442 vm, alloc_name,
4443 kernel_vma_start, 4443 kernel_vma_start,
4444 kernel_vma_limit - kernel_vma_start, 4444 kernel_vma_limit - kernel_vma_start,
@@ -4469,10 +4469,10 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4469 4469
4470clean_up_big_allocator: 4470clean_up_big_allocator:
4471 if (large_vma_start < large_vma_limit) 4471 if (large_vma_start < large_vma_limit)
4472 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 4472 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
4473clean_up_small_allocator: 4473clean_up_small_allocator:
4474 if (small_vma_start < small_vma_limit) 4474 if (small_vma_start < small_vma_limit)
4475 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 4475 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
4476clean_up_ptes: 4476clean_up_ptes:
4477 free_gmmu_pages(vm, &vm->pdb); 4477 free_gmmu_pages(vm, &vm->pdb);
4478clean_up_pdes: 4478clean_up_pdes:
@@ -4547,7 +4547,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4547{ 4547{
4548 int err = -ENOMEM; 4548 int err = -ENOMEM;
4549 int pgsz_idx = gmmu_page_size_small; 4549 int pgsz_idx = gmmu_page_size_small;
4550 struct gk20a_allocator *vma; 4550 struct nvgpu_allocator *vma;
4551 struct vm_gk20a *vm = as_share->vm; 4551 struct vm_gk20a *vm = as_share->vm;
4552 struct gk20a *g = vm->mm->g; 4552 struct gk20a *g = vm->mm->g;
4553 struct vm_reserved_va_node *va_node; 4553 struct vm_reserved_va_node *va_node;
@@ -4579,13 +4579,13 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4579 4579
4580 vma = &vm->vma[pgsz_idx]; 4580 vma = &vm->vma[pgsz_idx];
4581 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) { 4581 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) {
4582 if (gk20a_alloc_initialized(&vm->fixed)) 4582 if (nvgpu_alloc_initialized(&vm->fixed))
4583 vma = &vm->fixed; 4583 vma = &vm->fixed;
4584 vaddr_start = gk20a_alloc_fixed(vma, args->o_a.offset, 4584 vaddr_start = nvgpu_alloc_fixed(vma, args->o_a.offset,
4585 (u64)args->pages * 4585 (u64)args->pages *
4586 (u64)args->page_size); 4586 (u64)args->page_size);
4587 } else { 4587 } else {
4588 vaddr_start = gk20a_alloc(vma, 4588 vaddr_start = nvgpu_alloc(vma,
4589 (u64)args->pages * 4589 (u64)args->pages *
4590 (u64)args->page_size); 4590 (u64)args->page_size);
4591 } 4591 }
@@ -4621,7 +4621,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4621 APERTURE_INVALID); 4621 APERTURE_INVALID);
4622 if (!map_offset) { 4622 if (!map_offset) {
4623 mutex_unlock(&vm->update_gmmu_lock); 4623 mutex_unlock(&vm->update_gmmu_lock);
4624 gk20a_free(vma, vaddr_start); 4624 nvgpu_free(vma, vaddr_start);
4625 kfree(va_node); 4625 kfree(va_node);
4626 goto clean_up; 4626 goto clean_up;
4627 } 4627 }
@@ -4644,7 +4644,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4644{ 4644{
4645 int err = -ENOMEM; 4645 int err = -ENOMEM;
4646 int pgsz_idx; 4646 int pgsz_idx;
4647 struct gk20a_allocator *vma; 4647 struct nvgpu_allocator *vma;
4648 struct vm_gk20a *vm = as_share->vm; 4648 struct vm_gk20a *vm = as_share->vm;
4649 struct vm_reserved_va_node *va_node; 4649 struct vm_reserved_va_node *va_node;
4650 struct gk20a *g = gk20a_from_vm(vm); 4650 struct gk20a *g = gk20a_from_vm(vm);
@@ -4656,11 +4656,11 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4656 pgsz_idx = __nv_gmmu_va_is_big_page_region(vm, args->offset) ? 4656 pgsz_idx = __nv_gmmu_va_is_big_page_region(vm, args->offset) ?
4657 gmmu_page_size_big : gmmu_page_size_small; 4657 gmmu_page_size_big : gmmu_page_size_small;
4658 4658
4659 if (gk20a_alloc_initialized(&vm->fixed)) 4659 if (nvgpu_alloc_initialized(&vm->fixed))
4660 vma = &vm->fixed; 4660 vma = &vm->fixed;
4661 else 4661 else
4662 vma = &vm->vma[pgsz_idx]; 4662 vma = &vm->vma[pgsz_idx];
4663 gk20a_free(vma, args->offset); 4663 nvgpu_free(vma, args->offset);
4664 4664
4665 mutex_lock(&vm->update_gmmu_lock); 4665 mutex_lock(&vm->update_gmmu_lock);
4666 va_node = addr_to_reservation(vm, args->offset); 4666 va_node = addr_to_reservation(vm, args->offset);
@@ -4844,13 +4844,13 @@ int gk20a_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
4844 4844
4845void gk20a_deinit_vm(struct vm_gk20a *vm) 4845void gk20a_deinit_vm(struct vm_gk20a *vm)
4846{ 4846{
4847 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_kernel]); 4847 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_kernel]);
4848 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_big])) 4848 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_big]))
4849 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_big]); 4849 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_big]);
4850 if (gk20a_alloc_initialized(&vm->vma[gmmu_page_size_small])) 4850 if (nvgpu_alloc_initialized(&vm->vma[gmmu_page_size_small]))
4851 gk20a_alloc_destroy(&vm->vma[gmmu_page_size_small]); 4851 nvgpu_alloc_destroy(&vm->vma[gmmu_page_size_small]);
4852 if (gk20a_alloc_initialized(&vm->fixed)) 4852 if (nvgpu_alloc_initialized(&vm->fixed))
4853 gk20a_alloc_destroy(&vm->fixed); 4853 nvgpu_alloc_destroy(&vm->fixed);
4854 4854
4855 gk20a_vm_free_entries(vm, &vm->pdb, 0); 4855 gk20a_vm_free_entries(vm, &vm->pdb, 0);
4856} 4856}