diff options
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/fifo_gk20a.c')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index c1f94eb3..ca09c22a 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -483,7 +483,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) | |||
483 | for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { | 483 | for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { |
484 | runlist = &f->runlist_info[runlist_id]; | 484 | runlist = &f->runlist_info[runlist_id]; |
485 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | 485 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { |
486 | gk20a_gmmu_free(g, &runlist->mem[i]); | 486 | nvgpu_dma_free(g, &runlist->mem[i]); |
487 | } | 487 | } |
488 | 488 | ||
489 | nvgpu_kfree(g, runlist->active_channels); | 489 | nvgpu_kfree(g, runlist->active_channels); |
@@ -544,9 +544,9 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
544 | nvgpu_vfree(g, f->channel); | 544 | nvgpu_vfree(g, f->channel); |
545 | nvgpu_vfree(g, f->tsg); | 545 | nvgpu_vfree(g, f->tsg); |
546 | if (g->ops.mm.is_bar1_supported(g)) | 546 | if (g->ops.mm.is_bar1_supported(g)) |
547 | gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); | 547 | nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd); |
548 | else | 548 | else |
549 | gk20a_gmmu_free(g, &f->userd); | 549 | nvgpu_dma_free(g, &f->userd); |
550 | 550 | ||
551 | gk20a_fifo_delete_runlist(f); | 551 | gk20a_fifo_delete_runlist(f); |
552 | 552 | ||
@@ -686,7 +686,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
686 | f->num_runlist_entries, runlist_size); | 686 | f->num_runlist_entries, runlist_size); |
687 | 687 | ||
688 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | 688 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { |
689 | int err = gk20a_gmmu_alloc_sys(g, runlist_size, | 689 | int err = nvgpu_dma_alloc_sys(g, runlist_size, |
690 | &runlist->mem[i]); | 690 | &runlist->mem[i]); |
691 | if (err) { | 691 | if (err) { |
692 | dev_err(d, "memory allocation failed\n"); | 692 | dev_err(d, "memory allocation failed\n"); |
@@ -940,12 +940,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
940 | nvgpu_mutex_init(&f->free_chs_mutex); | 940 | nvgpu_mutex_init(&f->free_chs_mutex); |
941 | 941 | ||
942 | if (g->ops.mm.is_bar1_supported(g)) | 942 | if (g->ops.mm.is_bar1_supported(g)) |
943 | err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, | 943 | err = nvgpu_dma_alloc_map_sys(&g->mm.bar1.vm, |
944 | f->userd_entry_size * f->num_channels, | 944 | f->userd_entry_size * f->num_channels, |
945 | &f->userd); | 945 | &f->userd); |
946 | 946 | ||
947 | else | 947 | else |
948 | err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * | 948 | err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * |
949 | f->num_channels, &f->userd); | 949 | f->num_channels, &f->userd); |
950 | if (err) { | 950 | if (err) { |
951 | dev_err(d, "userd memory allocation failed\n"); | 951 | dev_err(d, "userd memory allocation failed\n"); |
@@ -980,9 +980,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
980 | clean_up: | 980 | clean_up: |
981 | gk20a_dbg_fn("fail"); | 981 | gk20a_dbg_fn("fail"); |
982 | if (g->ops.mm.is_bar1_supported(g)) | 982 | if (g->ops.mm.is_bar1_supported(g)) |
983 | gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); | 983 | nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd); |
984 | else | 984 | else |
985 | gk20a_gmmu_free(g, &f->userd); | 985 | nvgpu_dma_free(g, &f->userd); |
986 | 986 | ||
987 | nvgpu_vfree(g, f->channel); | 987 | nvgpu_vfree(g, f->channel); |
988 | f->channel = NULL; | 988 | f->channel = NULL; |