summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-08 20:06:47 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-30 15:36:04 -0400
commit7010bf88399ea81b2b35844f738baac19dc5a441 (patch)
tree476a43b94402d702f20c4336dec98009d23a2afa /drivers/gpu/nvgpu/gk20a/mm_gk20a.c
parentbc92e2fb972e039ee33c1f1477204a4d145a8b96 (diff)
gpu: nvgpu: Use new kmem API functions (gk20a mm)
Use the new kmem API functions in gk20a's mm code. Add a struct gk20a pointer to the dmabuf priv struct so that the cleanup function has access to the gk20a struct. Also add a gk20a pointer to some of the sg table functions so that they can use the nvgpu kmem APIs. Bug 1799159 Bug 1823380 Change-Id: I85a307c6bf862627c5b1af0e077283b48d78fa72 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1318321 Reviewed-by: Automatic_Commit_Validation_User Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: Konsta Holtta <kholtta@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c97
1 files changed, 57 insertions, 40 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index 102aae75..05535412 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -22,7 +22,6 @@
22#include <linux/nvhost.h> 22#include <linux/nvhost.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/nvmap.h> 24#include <linux/nvmap.h>
25#include <linux/vmalloc.h>
26#include <linux/dma-buf.h> 25#include <linux/dma-buf.h>
27#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
28#include <linux/dma-attrs.h> 27#include <linux/dma-attrs.h>
@@ -486,6 +485,8 @@ static struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf);
486struct gk20a_dmabuf_priv { 485struct gk20a_dmabuf_priv {
487 struct nvgpu_mutex lock; 486 struct nvgpu_mutex lock;
488 487
488 struct gk20a *g;
489
489 struct gk20a_comptag_allocator *comptag_allocator; 490 struct gk20a_comptag_allocator *comptag_allocator;
490 struct gk20a_comptags comptags; 491 struct gk20a_comptags comptags;
491 492
@@ -548,9 +549,13 @@ static void gk20a_mm_delete_priv(void *_priv)
548{ 549{
549 struct gk20a_buffer_state *s, *s_tmp; 550 struct gk20a_buffer_state *s, *s_tmp;
550 struct gk20a_dmabuf_priv *priv = _priv; 551 struct gk20a_dmabuf_priv *priv = _priv;
552 struct gk20a *g;
553
551 if (!priv) 554 if (!priv)
552 return; 555 return;
553 556
557 g = priv->g;
558
554 if (priv->comptags.lines) { 559 if (priv->comptags.lines) {
555 BUG_ON(!priv->comptag_allocator); 560 BUG_ON(!priv->comptag_allocator);
556 gk20a_comptaglines_free(priv->comptag_allocator, 561 gk20a_comptaglines_free(priv->comptag_allocator,
@@ -562,10 +567,10 @@ static void gk20a_mm_delete_priv(void *_priv)
562 list_for_each_entry_safe(s, s_tmp, &priv->states, list) { 567 list_for_each_entry_safe(s, s_tmp, &priv->states, list) {
563 gk20a_fence_put(s->fence); 568 gk20a_fence_put(s->fence);
564 list_del(&s->list); 569 list_del(&s->list);
565 kfree(s); 570 nvgpu_kfree(g, s);
566 } 571 }
567 572
568 kfree(priv); 573 nvgpu_kfree(g, priv);
569} 574}
570 575
571struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf) 576struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
@@ -1142,6 +1147,7 @@ static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
1142 u32 len = num_pages * PAGE_SIZE; 1147 u32 len = num_pages * PAGE_SIZE;
1143 int err; 1148 int err;
1144 struct page *pages; 1149 struct page *pages;
1150 struct gk20a *g = vm->mm->g;
1145 1151
1146 gk20a_dbg_fn(""); 1152 gk20a_dbg_fn("");
1147 1153
@@ -1152,7 +1158,7 @@ static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
1152 gk20a_dbg(gpu_dbg_pte, "alloc_pages failed"); 1158 gk20a_dbg(gpu_dbg_pte, "alloc_pages failed");
1153 goto err_out; 1159 goto err_out;
1154 } 1160 }
1155 entry->mem.sgt = kzalloc(sizeof(*entry->mem.sgt), GFP_KERNEL); 1161 entry->mem.sgt = nvgpu_kzalloc(g, sizeof(*entry->mem.sgt));
1156 if (!entry->mem.sgt) { 1162 if (!entry->mem.sgt) {
1157 gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table"); 1163 gk20a_dbg(gpu_dbg_pte, "cannot allocate sg table");
1158 goto err_alloced; 1164 goto err_alloced;
@@ -1172,7 +1178,7 @@ static int alloc_gmmu_phys_pages(struct vm_gk20a *vm, u32 order,
1172 return 0; 1178 return 0;
1173 1179
1174err_sg_table: 1180err_sg_table:
1175 kfree(entry->mem.sgt); 1181 nvgpu_kfree(vm->mm->g, entry->mem.sgt);
1176err_alloced: 1182err_alloced:
1177 __free_pages(pages, order); 1183 __free_pages(pages, order);
1178err_out: 1184err_out:
@@ -1190,7 +1196,7 @@ static void free_gmmu_phys_pages(struct vm_gk20a *vm,
1190 entry->mem.cpu_va = NULL; 1196 entry->mem.cpu_va = NULL;
1191 1197
1192 sg_free_table(entry->mem.sgt); 1198 sg_free_table(entry->mem.sgt);
1193 kfree(entry->mem.sgt); 1199 nvgpu_kfree(vm->mm->g, entry->mem.sgt);
1194 entry->mem.sgt = NULL; 1200 entry->mem.sgt = NULL;
1195 entry->mem.size = 0; 1201 entry->mem.size = 0;
1196 entry->mem.aperture = APERTURE_INVALID; 1202 entry->mem.aperture = APERTURE_INVALID;
@@ -2113,7 +2119,7 @@ static void gk20a_vidbuf_release(struct dma_buf *dmabuf)
2113 buf->dmabuf_priv_delete(buf->dmabuf_priv); 2119 buf->dmabuf_priv_delete(buf->dmabuf_priv);
2114 2120
2115 gk20a_gmmu_free(buf->g, buf->mem); 2121 gk20a_gmmu_free(buf->g, buf->mem);
2116 kfree(buf); 2122 nvgpu_kfree(buf->g, buf);
2117} 2123}
2118 2124
2119static void *gk20a_vidbuf_kmap(struct dma_buf *dmabuf, unsigned long page_num) 2125static void *gk20a_vidbuf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
@@ -2204,7 +2210,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
2204 2210
2205 gk20a_dbg_fn(""); 2211 gk20a_dbg_fn("");
2206 2212
2207 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 2213 buf = nvgpu_kzalloc(g, sizeof(*buf));
2208 if (!buf) 2214 if (!buf)
2209 return -ENOMEM; 2215 return -ENOMEM;
2210 2216
@@ -2223,7 +2229,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
2223 nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex); 2229 nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex);
2224 } 2230 }
2225 2231
2226 buf->mem = kzalloc(sizeof(struct mem_desc), GFP_KERNEL); 2232 buf->mem = nvgpu_kzalloc(g, sizeof(struct mem_desc));
2227 if (!buf->mem) 2233 if (!buf->mem)
2228 goto err_kfree; 2234 goto err_kfree;
2229 2235
@@ -2254,9 +2260,9 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
2254err_bfree: 2260err_bfree:
2255 gk20a_gmmu_free(g, buf->mem); 2261 gk20a_gmmu_free(g, buf->mem);
2256err_memfree: 2262err_memfree:
2257 kfree(buf->mem); 2263 nvgpu_kfree(g, buf->mem);
2258err_kfree: 2264err_kfree:
2259 kfree(buf); 2265 nvgpu_kfree(g, buf);
2260 return err; 2266 return err;
2261#else 2267#else
2262 return -ENOSYS; 2268 return -ENOSYS;
@@ -2556,7 +2562,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2556 2562
2557 /* keep track of the buffer for unmapping */ 2563 /* keep track of the buffer for unmapping */
2558 /* TBD: check for multiple mapping of same buffer */ 2564 /* TBD: check for multiple mapping of same buffer */
2559 mapped_buffer = kzalloc(sizeof(*mapped_buffer), GFP_KERNEL); 2565 mapped_buffer = nvgpu_kzalloc(g, sizeof(*mapped_buffer));
2560 if (!mapped_buffer) { 2566 if (!mapped_buffer) {
2561 gk20a_warn(d, "oom allocating tracking buffer"); 2567 gk20a_warn(d, "oom allocating tracking buffer");
2562 goto clean_up; 2568 goto clean_up;
@@ -2609,7 +2615,7 @@ clean_up:
2609 if (user_mapped) 2615 if (user_mapped)
2610 vm->num_user_mapped_buffers--; 2616 vm->num_user_mapped_buffers--;
2611 } 2617 }
2612 kfree(mapped_buffer); 2618 nvgpu_kfree(g, mapped_buffer);
2613 if (va_allocated) 2619 if (va_allocated)
2614 gk20a_vm_free_va(vm, map_offset, bfr.size, bfr.pgsz_idx); 2620 gk20a_vm_free_va(vm, map_offset, bfr.size, bfr.pgsz_idx);
2615 if (!IS_ERR(bfr.sgt)) 2621 if (!IS_ERR(bfr.sgt))
@@ -3007,7 +3013,7 @@ static void gk20a_gmmu_free_sys(struct gk20a *g, struct mem_desc *mem)
3007 } 3013 }
3008 3014
3009 if (mem->sgt) 3015 if (mem->sgt)
3010 gk20a_free_sgtable(&mem->sgt); 3016 gk20a_free_sgtable(g, &mem->sgt);
3011 3017
3012 mem->size = 0; 3018 mem->size = 0;
3013 mem->aperture = APERTURE_INVALID; 3019 mem->aperture = APERTURE_INVALID;
@@ -3144,7 +3150,7 @@ int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
3144 else 3150 else
3145 mem->fixed = false; 3151 mem->fixed = false;
3146 3152
3147 mem->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 3153 mem->sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
3148 if (!mem->sgt) { 3154 if (!mem->sgt) {
3149 err = -ENOMEM; 3155 err = -ENOMEM;
3150 goto fail_physfree; 3156 goto fail_physfree;
@@ -3168,7 +3174,7 @@ int gk20a_gmmu_alloc_flags_vid_at(struct gk20a *g, unsigned long flags,
3168 return 0; 3174 return 0;
3169 3175
3170fail_kfree: 3176fail_kfree:
3171 kfree(mem->sgt); 3177 nvgpu_kfree(g, mem->sgt);
3172fail_physfree: 3178fail_physfree:
3173 nvgpu_free(&g->mm.vidmem.allocator, addr); 3179 nvgpu_free(&g->mm.vidmem.allocator, addr);
3174 return err; 3180 return err;
@@ -3201,7 +3207,7 @@ static void gk20a_gmmu_free_vid(struct gk20a *g, struct mem_desc *mem)
3201 gk20a_memset(g, mem, 0, 0, mem->size); 3207 gk20a_memset(g, mem, 0, 0, mem->size);
3202 nvgpu_free(mem->allocator, 3208 nvgpu_free(mem->allocator,
3203 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 3209 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
3204 gk20a_free_sgtable(&mem->sgt); 3210 gk20a_free_sgtable(g, &mem->sgt);
3205 3211
3206 mem->size = 0; 3212 mem->size = 0;
3207 mem->aperture = APERTURE_INVALID; 3213 mem->aperture = APERTURE_INVALID;
@@ -3271,14 +3277,14 @@ static void gk20a_vidmem_clear_mem_worker(struct work_struct *work)
3271 gk20a_gmmu_clear_vidmem_mem(g, mem); 3277 gk20a_gmmu_clear_vidmem_mem(g, mem);
3272 nvgpu_free(mem->allocator, 3278 nvgpu_free(mem->allocator,
3273 (u64)get_vidmem_page_alloc(mem->sgt->sgl)); 3279 (u64)get_vidmem_page_alloc(mem->sgt->sgl));
3274 gk20a_free_sgtable(&mem->sgt); 3280 gk20a_free_sgtable(g, &mem->sgt);
3275 3281
3276 WARN_ON(atomic64_sub_return(mem->size, 3282 WARN_ON(atomic64_sub_return(mem->size,
3277 &g->mm.vidmem.bytes_pending) < 0); 3283 &g->mm.vidmem.bytes_pending) < 0);
3278 mem->size = 0; 3284 mem->size = 0;
3279 mem->aperture = APERTURE_INVALID; 3285 mem->aperture = APERTURE_INVALID;
3280 3286
3281 kfree(mem); 3287 nvgpu_kfree(g, mem);
3282 } 3288 }
3283} 3289}
3284#endif 3290#endif
@@ -3445,8 +3451,10 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
3445 void *cpuva, u64 iova, 3451 void *cpuva, u64 iova,
3446 size_t size) 3452 size_t size)
3447{ 3453{
3454 struct gk20a *g = get_gk20a(d);
3455
3448 int err = 0; 3456 int err = 0;
3449 *sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 3457 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
3450 if (!(*sgt)) { 3458 if (!(*sgt)) {
3451 dev_err(d, "failed to allocate memory\n"); 3459 dev_err(d, "failed to allocate memory\n");
3452 err = -ENOMEM; 3460 err = -ENOMEM;
@@ -3464,7 +3472,7 @@ int gk20a_get_sgtable(struct device *d, struct sg_table **sgt,
3464 return 0; 3472 return 0;
3465 fail: 3473 fail:
3466 if (*sgt) { 3474 if (*sgt) {
3467 kfree(*sgt); 3475 nvgpu_kfree(g, *sgt);
3468 *sgt = NULL; 3476 *sgt = NULL;
3469 } 3477 }
3470 return err; 3478 return err;
@@ -3475,7 +3483,9 @@ int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
3475 size_t size) 3483 size_t size)
3476{ 3484{
3477 int err = 0; 3485 int err = 0;
3478 *sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 3486 struct gk20a *g = get_gk20a(d);
3487
3488 *sgt = nvgpu_kzalloc(g, sizeof(struct sg_table));
3479 if (!(*sgt)) { 3489 if (!(*sgt)) {
3480 dev_err(d, "failed to allocate memory\n"); 3490 dev_err(d, "failed to allocate memory\n");
3481 err = -ENOMEM; 3491 err = -ENOMEM;
@@ -3492,16 +3502,16 @@ int gk20a_get_sgtable_from_pages(struct device *d, struct sg_table **sgt,
3492 return 0; 3502 return 0;
3493 fail: 3503 fail:
3494 if (*sgt) { 3504 if (*sgt) {
3495 kfree(*sgt); 3505 nvgpu_kfree(get_gk20a(d), *sgt);
3496 *sgt = NULL; 3506 *sgt = NULL;
3497 } 3507 }
3498 return err; 3508 return err;
3499} 3509}
3500 3510
3501void gk20a_free_sgtable(struct sg_table **sgt) 3511void gk20a_free_sgtable(struct gk20a *g, struct sg_table **sgt)
3502{ 3512{
3503 sg_free_table(*sgt); 3513 sg_free_table(*sgt);
3504 kfree(*sgt); 3514 nvgpu_kfree(g, *sgt);
3505 *sgt = NULL; 3515 *sgt = NULL;
3506} 3516}
3507 3517
@@ -3773,7 +3783,8 @@ static int update_gmmu_level_locked(struct vm_gk20a *vm,
3773 (l->hi_bit[pgsz_idx] 3783 (l->hi_bit[pgsz_idx]
3774 - l->lo_bit[pgsz_idx] + 1); 3784 - l->lo_bit[pgsz_idx] + 1);
3775 pte->entries = 3785 pte->entries =
3776 vzalloc(sizeof(struct gk20a_mm_entry) * 3786 nvgpu_vzalloc(g,
3787 sizeof(struct gk20a_mm_entry) *
3777 num_entries); 3788 num_entries);
3778 if (!pte->entries) 3789 if (!pte->entries)
3779 return -ENOMEM; 3790 return -ENOMEM;
@@ -4030,7 +4041,7 @@ void gk20a_vm_unmap_locked(struct mapped_buffer_node *mapped_buffer,
4030 if (mapped_buffer->own_mem_ref) 4041 if (mapped_buffer->own_mem_ref)
4031 dma_buf_put(mapped_buffer->dmabuf); 4042 dma_buf_put(mapped_buffer->dmabuf);
4032 4043
4033 kfree(mapped_buffer); 4044 nvgpu_kfree(g, mapped_buffer);
4034 4045
4035 return; 4046 return;
4036} 4047}
@@ -4064,7 +4075,7 @@ static void gk20a_vm_free_entries(struct vm_gk20a *vm,
4064 4075
4065 if (parent->mem.size) 4076 if (parent->mem.size)
4066 free_gmmu_pages(vm, parent); 4077 free_gmmu_pages(vm, parent);
4067 vfree(parent->entries); 4078 nvgpu_vfree(vm->mm->g, parent->entries);
4068 parent->entries = NULL; 4079 parent->entries = NULL;
4069} 4080}
4070 4081
@@ -4106,7 +4117,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4106 list_for_each_entry_safe(va_node, va_node_tmp, &vm->reserved_va_list, 4117 list_for_each_entry_safe(va_node, va_node_tmp, &vm->reserved_va_list,
4107 reserved_va_list) { 4118 reserved_va_list) {
4108 list_del(&va_node->reserved_va_list); 4119 list_del(&va_node->reserved_va_list);
4109 kfree(va_node); 4120 nvgpu_kfree(vm->mm->g, va_node);
4110 } 4121 }
4111 4122
4112 gk20a_deinit_vm(vm); 4123 gk20a_deinit_vm(vm);
@@ -4118,7 +4129,7 @@ void gk20a_vm_remove_support(struct vm_gk20a *vm)
4118{ 4129{
4119 gk20a_vm_remove_support_nofree(vm); 4130 gk20a_vm_remove_support_nofree(vm);
4120 /* vm is not used anymore. release it. */ 4131 /* vm is not used anymore. release it. */
4121 kfree(vm); 4132 nvgpu_kfree(vm->mm->g, vm);
4122} 4133}
4123 4134
4124static void gk20a_vm_remove_support_kref(struct kref *ref) 4135static void gk20a_vm_remove_support_kref(struct kref *ref)
@@ -4309,8 +4320,9 @@ static int init_vm_page_tables(struct vm_gk20a *vm)
4309 pde_range_from_vaddr_range(vm, 4320 pde_range_from_vaddr_range(vm,
4310 0, vm->va_limit-1, 4321 0, vm->va_limit-1,
4311 &pde_lo, &pde_hi); 4322 &pde_lo, &pde_hi);
4312 vm->pdb.entries = vzalloc(sizeof(struct gk20a_mm_entry) * 4323 vm->pdb.entries = nvgpu_vzalloc(vm->mm->g,
4313 (pde_hi + 1)); 4324 sizeof(struct gk20a_mm_entry) *
4325 (pde_hi + 1));
4314 vm->pdb.num_entries = pde_hi + 1; 4326 vm->pdb.num_entries = pde_hi + 1;
4315 4327
4316 if (!vm->pdb.entries) 4328 if (!vm->pdb.entries)
@@ -4561,7 +4573,7 @@ clean_up_allocators:
4561 nvgpu_alloc_destroy(&vm->user_lp); 4573 nvgpu_alloc_destroy(&vm->user_lp);
4562clean_up_page_tables: 4574clean_up_page_tables:
4563 /* Cleans up init_vm_page_tables() */ 4575 /* Cleans up init_vm_page_tables() */
4564 vfree(vm->pdb.entries); 4576 nvgpu_vfree(g, vm->pdb.entries);
4565 free_gmmu_pages(vm, &vm->pdb); 4577 free_gmmu_pages(vm, &vm->pdb);
4566 return err; 4578 return err;
4567} 4579}
@@ -4592,7 +4604,7 @@ int gk20a_vm_alloc_share(struct gk20a_as_share *as_share, u32 big_page_size,
4592 return -EINVAL; 4604 return -EINVAL;
4593 } 4605 }
4594 4606
4595 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 4607 vm = nvgpu_kzalloc(g, sizeof(*vm));
4596 if (!vm) 4608 if (!vm)
4597 return -ENOMEM; 4609 return -ENOMEM;
4598 4610
@@ -4656,7 +4668,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4656 goto clean_up; 4668 goto clean_up;
4657 } 4669 }
4658 4670
4659 va_node = kzalloc(sizeof(*va_node), GFP_KERNEL); 4671 va_node = nvgpu_kzalloc(g, sizeof(*va_node));
4660 if (!va_node) { 4672 if (!va_node) {
4661 err = -ENOMEM; 4673 err = -ENOMEM;
4662 goto clean_up; 4674 goto clean_up;
@@ -4674,7 +4686,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4674 (u64)args->page_size); 4686 (u64)args->page_size);
4675 4687
4676 if (!vaddr_start) { 4688 if (!vaddr_start) {
4677 kfree(va_node); 4689 nvgpu_kfree(g, va_node);
4678 goto clean_up; 4690 goto clean_up;
4679 } 4691 }
4680 4692
@@ -4705,7 +4717,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4705 if (!map_offset) { 4717 if (!map_offset) {
4706 nvgpu_mutex_release(&vm->update_gmmu_lock); 4718 nvgpu_mutex_release(&vm->update_gmmu_lock);
4707 nvgpu_free(vma, vaddr_start); 4719 nvgpu_free(vma, vaddr_start);
4708 kfree(va_node); 4720 nvgpu_kfree(g, va_node);
4709 goto clean_up; 4721 goto clean_up;
4710 } 4722 }
4711 4723
@@ -4768,7 +4780,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4768 gk20a_mem_flag_none, 4780 gk20a_mem_flag_none,
4769 true, 4781 true,
4770 NULL); 4782 NULL);
4771 kfree(va_node); 4783 nvgpu_kfree(g, va_node);
4772 } 4784 }
4773 nvgpu_mutex_release(&vm->update_gmmu_lock); 4785 nvgpu_mutex_release(&vm->update_gmmu_lock);
4774 err = 0; 4786 err = 0;
@@ -4811,15 +4823,19 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
4811 priv = dma_buf_get_drvdata(dmabuf, dev); 4823 priv = dma_buf_get_drvdata(dmabuf, dev);
4812 if (priv) 4824 if (priv)
4813 goto priv_exist_or_err; 4825 goto priv_exist_or_err;
4814 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 4826
4827 priv = nvgpu_kzalloc(g, sizeof(*priv));
4815 if (!priv) { 4828 if (!priv) {
4816 priv = ERR_PTR(-ENOMEM); 4829 priv = ERR_PTR(-ENOMEM);
4817 goto priv_exist_or_err; 4830 goto priv_exist_or_err;
4818 } 4831 }
4832
4819 nvgpu_mutex_init(&priv->lock); 4833 nvgpu_mutex_init(&priv->lock);
4820 INIT_LIST_HEAD(&priv->states); 4834 INIT_LIST_HEAD(&priv->states);
4821 priv->buffer_id = ++priv_count; 4835 priv->buffer_id = ++priv_count;
4836 priv->g = g;
4822 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv); 4837 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
4838
4823priv_exist_or_err: 4839priv_exist_or_err:
4824 nvgpu_mutex_release(&g->mm.priv_lock); 4840 nvgpu_mutex_release(&g->mm.priv_lock);
4825 if (IS_ERR(priv)) 4841 if (IS_ERR(priv))
@@ -4834,6 +4850,7 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev,
4834 int err = 0; 4850 int err = 0;
4835 struct gk20a_dmabuf_priv *priv; 4851 struct gk20a_dmabuf_priv *priv;
4836 struct gk20a_buffer_state *s; 4852 struct gk20a_buffer_state *s;
4853 struct gk20a *g = get_gk20a(dev);
4837 4854
4838 if (WARN_ON(offset >= (u64)dmabuf->size)) 4855 if (WARN_ON(offset >= (u64)dmabuf->size))
4839 return -EINVAL; 4856 return -EINVAL;
@@ -4853,7 +4870,7 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev,
4853 goto out; 4870 goto out;
4854 4871
4855 /* State not found, create state. */ 4872 /* State not found, create state. */
4856 s = kzalloc(sizeof(*s), GFP_KERNEL); 4873 s = nvgpu_kzalloc(g, sizeof(*s));
4857 if (!s) { 4874 if (!s) {
4858 err = -ENOMEM; 4875 err = -ENOMEM;
4859 goto out; 4876 goto out;