summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-03-08 19:58:25 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-03-28 12:39:07 -0400
commit2e15a2d1accb8303c2363122c638e08ae7b70a50 (patch)
treefd967e64059e4b868f26de0aab56828984c52139 /drivers/gpu/nvgpu/vgpu/gr_vgpu.c
parent8a15e02ca92b83aa5a216ea9cd42680373212ecd (diff)
gpu: nvgpu: Use new kmem API functions (vgpu/*)
Use the new kmem API functions in vgpu/*. Also reshuffle the order of some allocs in the vgpu init code to allow usage of the nvgpu kmem APIs. Bug 1799159 Bug 1823380 Change-Id: I6c6dcff03b406a260dffbf89a59b368d31a4cb2c Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: http://git-master/r/1318318 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/vgpu/gr_vgpu.c')
-rw-r--r--drivers/gpu/nvgpu/vgpu/gr_vgpu.c35
1 files changed, 19 insertions, 16 deletions
diff --git a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
index a98c9d38..c6a51719 100644
--- a/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/gr_vgpu.c
@@ -13,6 +13,8 @@
13 * more details. 13 * more details.
14 */ 14 */
15 15
16#include <nvgpu/kmem.h>
17
16#include "vgpu/vgpu.h" 18#include "vgpu/vgpu.h"
17#include "gk20a/dbg_gpu_gk20a.h" 19#include "gk20a/dbg_gpu_gk20a.h"
18 20
@@ -269,7 +271,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
269 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size; 271 gr->ctx_vars.buffer_size = gr->ctx_vars.golden_image_size;
270 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size; 272 gr->ctx_vars.buffer_total_size = gr->ctx_vars.golden_image_size;
271 273
272 gr_ctx = kzalloc(sizeof(*gr_ctx), GFP_KERNEL); 274 gr_ctx = nvgpu_kzalloc(g, sizeof(*gr_ctx));
273 if (!gr_ctx) 275 if (!gr_ctx)
274 return -ENOMEM; 276 return -ENOMEM;
275 277
@@ -279,7 +281,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
279 gmmu_page_size_kernel); 281 gmmu_page_size_kernel);
280 282
281 if (!gr_ctx->mem.gpu_va) { 283 if (!gr_ctx->mem.gpu_va) {
282 kfree(gr_ctx); 284 nvgpu_kfree(g, gr_ctx);
283 return -ENOMEM; 285 return -ENOMEM;
284 } 286 }
285 287
@@ -295,7 +297,7 @@ int vgpu_gr_alloc_gr_ctx(struct gk20a *g,
295 gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx"); 297 gk20a_err(dev_from_gk20a(g), "fail to alloc gr_ctx");
296 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, 298 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va,
297 gr_ctx->mem.size, gmmu_page_size_kernel); 299 gr_ctx->mem.size, gmmu_page_size_kernel);
298 kfree(gr_ctx); 300 nvgpu_kfree(g, gr_ctx);
299 } else { 301 } else {
300 gr_ctx->virt_ctx = p->gr_ctx_handle; 302 gr_ctx->virt_ctx = p->gr_ctx_handle;
301 *__gr_ctx = gr_ctx; 303 *__gr_ctx = gr_ctx;
@@ -322,7 +324,7 @@ void vgpu_gr_free_gr_ctx(struct gk20a *g, struct vm_gk20a *vm,
322 324
323 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size, 325 gk20a_vm_free_va(vm, gr_ctx->mem.gpu_va, gr_ctx->mem.size,
324 gmmu_page_size_kernel); 326 gmmu_page_size_kernel);
325 kfree(gr_ctx); 327 nvgpu_kfree(g, gr_ctx);
326 } 328 }
327} 329}
328 330
@@ -617,16 +619,17 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
617 619
618 gr->max_tpc_count = gr->max_gpc_count * gr->max_tpc_per_gpc_count; 620 gr->max_tpc_count = gr->max_gpc_count * gr->max_tpc_per_gpc_count;
619 621
620 gr->gpc_tpc_count = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); 622 gr->gpc_tpc_count = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
621 if (!gr->gpc_tpc_count) 623 if (!gr->gpc_tpc_count)
622 goto cleanup; 624 goto cleanup;
623 625
624 gr->gpc_tpc_mask = kzalloc(gr->gpc_count * sizeof(u32), GFP_KERNEL); 626 gr->gpc_tpc_mask = nvgpu_kzalloc(g, gr->gpc_count * sizeof(u32));
625 if (!gr->gpc_tpc_mask) 627 if (!gr->gpc_tpc_mask)
626 goto cleanup; 628 goto cleanup;
627 629
628 gr->sm_to_cluster = kzalloc(gr->gpc_count * gr->max_tpc_per_gpc_count * 630 gr->sm_to_cluster = nvgpu_kzalloc(g, gr->gpc_count *
629 sizeof(struct sm_info), GFP_KERNEL); 631 gr->max_tpc_per_gpc_count *
632 sizeof(struct sm_info));
630 if (!gr->sm_to_cluster) 633 if (!gr->sm_to_cluster)
631 goto cleanup; 634 goto cleanup;
632 635
@@ -650,10 +653,10 @@ static int vgpu_gr_init_gr_config(struct gk20a *g, struct gr_gk20a *gr)
650cleanup: 653cleanup:
651 gk20a_err(dev_from_gk20a(g), "%s: out of memory", __func__); 654 gk20a_err(dev_from_gk20a(g), "%s: out of memory", __func__);
652 655
653 kfree(gr->gpc_tpc_count); 656 nvgpu_kfree(g, gr->gpc_tpc_count);
654 gr->gpc_tpc_count = NULL; 657 gr->gpc_tpc_count = NULL;
655 658
656 kfree(gr->gpc_tpc_mask); 659 nvgpu_kfree(g, gr->gpc_tpc_mask);
657 gr->gpc_tpc_mask = NULL; 660 gr->gpc_tpc_mask = NULL;
658 661
659 return -ENOMEM; 662 return -ENOMEM;
@@ -838,16 +841,16 @@ static void vgpu_remove_gr_support(struct gr_gk20a *gr)
838 841
839 gk20a_comptag_allocator_destroy(&gr->comp_tags); 842 gk20a_comptag_allocator_destroy(&gr->comp_tags);
840 843
841 kfree(gr->sm_error_states); 844 nvgpu_kfree(gr->g, gr->sm_error_states);
842 gr->sm_error_states = NULL; 845 gr->sm_error_states = NULL;
843 846
844 kfree(gr->gpc_tpc_mask); 847 nvgpu_kfree(gr->g, gr->gpc_tpc_mask);
845 gr->gpc_tpc_mask = NULL; 848 gr->gpc_tpc_mask = NULL;
846 849
847 kfree(gr->sm_to_cluster); 850 nvgpu_kfree(gr->g, gr->sm_to_cluster);
848 gr->sm_to_cluster = NULL; 851 gr->sm_to_cluster = NULL;
849 852
850 kfree(gr->gpc_tpc_count); 853 nvgpu_kfree(gr->g, gr->gpc_tpc_count);
851 gr->gpc_tpc_count = NULL; 854 gr->gpc_tpc_count = NULL;
852} 855}
853 856
@@ -887,9 +890,9 @@ static int vgpu_gr_init_gr_setup_sw(struct gk20a *g)
887 890
888 nvgpu_mutex_init(&gr->ctx_mutex); 891 nvgpu_mutex_init(&gr->ctx_mutex);
889 892
890 gr->sm_error_states = kzalloc( 893 gr->sm_error_states = nvgpu_kzalloc(g,
891 sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) * 894 sizeof(struct nvgpu_dbg_gpu_sm_error_state_record) *
892 gr->no_of_sm, GFP_KERNEL); 895 gr->no_of_sm);
893 if (!gr->sm_error_states) { 896 if (!gr->sm_error_states) {
894 err = -ENOMEM; 897 err = -ENOMEM;
895 goto clean_up; 898 goto clean_up;