diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-03-21 18:34:50 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-04-06 21:15:04 -0400 |
commit | 50667e097b2be567e3d2f95e23b046243bca2bf6 (patch) | |
tree | e8fc42261868c6d69844f2e92fce33f6169434d4 /drivers/gpu/nvgpu/gk20a | |
parent | 8f2d4a3f4a0acc81bae6725d30506e92651a42b5 (diff) |
gpu: nvgpu: Rename nvgpu DMA APIs
Rename the nvgpu DMA APIs from gk20a_gmmu_alloc* to nvgpu_dma_alloc*.
This better reflects the purpose of the APIs (to allocate DMA suitable
memory) and avoids confusion with GMMU related code.
JIRA NVGPU-12
Change-Id: I673d607db56dd6e44f02008dc7b5293209ef67bf
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: http://git-master/r/1325548
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a')
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/cde_gk20a.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | 16 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 40 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/ltc_common.c | 4 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/mm_gk20a.c | 18 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | 14 |
10 files changed, 60 insertions, 60 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c index e70ee4a6..7c251e2d 100644 --- a/drivers/gpu/nvgpu/gk20a/cde_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/cde_gk20a.c | |||
@@ -54,7 +54,7 @@ static void gk20a_deinit_cde_img(struct gk20a_cde_ctx *cde_ctx) | |||
54 | 54 | ||
55 | for (i = 0; i < cde_ctx->num_bufs; i++) { | 55 | for (i = 0; i < cde_ctx->num_bufs; i++) { |
56 | struct nvgpu_mem *mem = cde_ctx->mem + i; | 56 | struct nvgpu_mem *mem = cde_ctx->mem + i; |
57 | gk20a_gmmu_unmap_free(cde_ctx->vm, mem); | 57 | nvgpu_dma_unmap_free(cde_ctx->vm, mem); |
58 | } | 58 | } |
59 | 59 | ||
60 | nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd); | 60 | nvgpu_kfree(cde_ctx->g, cde_ctx->init_convert_cmd); |
@@ -247,7 +247,7 @@ static int gk20a_init_cde_buf(struct gk20a_cde_ctx *cde_ctx, | |||
247 | 247 | ||
248 | /* allocate buf */ | 248 | /* allocate buf */ |
249 | mem = cde_ctx->mem + cde_ctx->num_bufs; | 249 | mem = cde_ctx->mem + cde_ctx->num_bufs; |
250 | err = gk20a_gmmu_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); | 250 | err = nvgpu_dma_alloc_map_sys(cde_ctx->vm, buf->num_bytes, mem); |
251 | if (err) { | 251 | if (err) { |
252 | gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", | 252 | gk20a_warn(cde_ctx->dev, "cde: could not allocate device memory. buffer idx = %d", |
253 | cde_ctx->num_bufs); | 253 | cde_ctx->num_bufs); |
diff --git a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c index 9cc4b678..f3ac28ea 100644 --- a/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/ce2_gk20a.c | |||
@@ -195,7 +195,7 @@ static void gk20a_ce_delete_gpu_context(struct gk20a_gpu_ctx *ce_ctx) | |||
195 | 195 | ||
196 | if (ce_ctx->cmd_buf_mem.cpu_va) { | 196 | if (ce_ctx->cmd_buf_mem.cpu_va) { |
197 | gk20a_ce_free_command_buffer_stored_fence(ce_ctx); | 197 | gk20a_ce_free_command_buffer_stored_fence(ce_ctx); |
198 | gk20a_gmmu_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem); | 198 | nvgpu_dma_unmap_free(ce_ctx->vm, &ce_ctx->cmd_buf_mem); |
199 | } | 199 | } |
200 | 200 | ||
201 | /* free the channel */ | 201 | /* free the channel */ |
@@ -479,7 +479,7 @@ u32 gk20a_ce_create_context_with_cb(struct device *dev, | |||
479 | } | 479 | } |
480 | 480 | ||
481 | /* allocate command buffer (4096 should be more than enough) from sysmem*/ | 481 | /* allocate command buffer (4096 should be more than enough) from sysmem*/ |
482 | err = gk20a_gmmu_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); | 482 | err = nvgpu_dma_alloc_map_sys(ce_ctx->vm, NVGPU_CE_COMMAND_BUF_SIZE, &ce_ctx->cmd_buf_mem); |
483 | if (err) { | 483 | if (err) { |
484 | gk20a_err(ce_ctx->dev, | 484 | gk20a_err(ce_ctx->dev, |
485 | "ce: could not allocate command buffer for CE context"); | 485 | "ce: could not allocate command buffer for CE context"); |
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index 6be616b3..81901c52 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c | |||
@@ -523,7 +523,7 @@ static void gk20a_free_channel(struct channel_gk20a *ch, bool force) | |||
523 | 523 | ||
524 | gk20a_gr_flush_channel_tlb(gr); | 524 | gk20a_gr_flush_channel_tlb(gr); |
525 | 525 | ||
526 | gk20a_gmmu_unmap_free(ch_vm, &ch->gpfifo.mem); | 526 | nvgpu_dma_unmap_free(ch_vm, &ch->gpfifo.mem); |
527 | nvgpu_big_free(g, ch->gpfifo.pipe); | 527 | nvgpu_big_free(g, ch->gpfifo.pipe); |
528 | memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); | 528 | memset(&ch->gpfifo, 0, sizeof(struct gpfifo_desc)); |
529 | 529 | ||
@@ -899,7 +899,7 @@ static int channel_gk20a_alloc_priv_cmdbuf(struct channel_gk20a *c) | |||
899 | size = roundup_pow_of_two(c->gpfifo.entry_num * | 899 | size = roundup_pow_of_two(c->gpfifo.entry_num * |
900 | 2 * 18 * sizeof(u32) / 3); | 900 | 2 * 18 * sizeof(u32) / 3); |
901 | 901 | ||
902 | err = gk20a_gmmu_alloc_map_sys(ch_vm, size, &q->mem); | 902 | err = nvgpu_dma_alloc_map_sys(ch_vm, size, &q->mem); |
903 | if (err) { | 903 | if (err) { |
904 | gk20a_err(d, "%s: memory allocation failed\n", __func__); | 904 | gk20a_err(d, "%s: memory allocation failed\n", __func__); |
905 | goto clean_up; | 905 | goto clean_up; |
@@ -922,7 +922,7 @@ static void channel_gk20a_free_priv_cmdbuf(struct channel_gk20a *c) | |||
922 | if (q->size == 0) | 922 | if (q->size == 0) |
923 | return; | 923 | return; |
924 | 924 | ||
925 | gk20a_gmmu_unmap_free(ch_vm, &q->mem); | 925 | nvgpu_dma_unmap_free(ch_vm, &q->mem); |
926 | 926 | ||
927 | memset(q, 0, sizeof(struct priv_cmd_queue)); | 927 | memset(q, 0, sizeof(struct priv_cmd_queue)); |
928 | } | 928 | } |
@@ -1244,7 +1244,7 @@ int gk20a_channel_alloc_gpfifo(struct channel_gk20a *c, | |||
1244 | return -EEXIST; | 1244 | return -EEXIST; |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | err = gk20a_gmmu_alloc_map_sys(ch_vm, | 1247 | err = nvgpu_dma_alloc_map_sys(ch_vm, |
1248 | gpfifo_size * sizeof(struct nvgpu_gpfifo), | 1248 | gpfifo_size * sizeof(struct nvgpu_gpfifo), |
1249 | &c->gpfifo.mem); | 1249 | &c->gpfifo.mem); |
1250 | if (err) { | 1250 | if (err) { |
@@ -1331,7 +1331,7 @@ clean_up_sync: | |||
1331 | } | 1331 | } |
1332 | clean_up_unmap: | 1332 | clean_up_unmap: |
1333 | nvgpu_big_free(g, c->gpfifo.pipe); | 1333 | nvgpu_big_free(g, c->gpfifo.pipe); |
1334 | gk20a_gmmu_unmap_free(ch_vm, &c->gpfifo.mem); | 1334 | nvgpu_dma_unmap_free(ch_vm, &c->gpfifo.mem); |
1335 | clean_up: | 1335 | clean_up: |
1336 | memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); | 1336 | memset(&c->gpfifo, 0, sizeof(struct gpfifo_desc)); |
1337 | gk20a_err(d, "fail"); | 1337 | gk20a_err(d, "fail"); |
diff --git a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c index 738e8c1c..e5910e7f 100644 --- a/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/css_gr_gk20a.c | |||
@@ -143,7 +143,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch, | |||
143 | if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) | 143 | if (snapshot_size < CSS_MIN_HW_SNAPSHOT_SIZE) |
144 | snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; | 144 | snapshot_size = CSS_MIN_HW_SNAPSHOT_SIZE; |
145 | 145 | ||
146 | ret = gk20a_gmmu_alloc_map_sys(&g->mm.pmu.vm, snapshot_size, | 146 | ret = nvgpu_dma_alloc_map_sys(&g->mm.pmu.vm, snapshot_size, |
147 | &data->hw_memdesc); | 147 | &data->hw_memdesc); |
148 | if (ret) | 148 | if (ret) |
149 | return ret; | 149 | return ret; |
@@ -192,7 +192,7 @@ static int css_hw_enable_snapshot(struct channel_gk20a *ch, | |||
192 | 192 | ||
193 | failed_allocation: | 193 | failed_allocation: |
194 | if (data->hw_memdesc.size) { | 194 | if (data->hw_memdesc.size) { |
195 | gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); | 195 | nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); |
196 | memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); | 196 | memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); |
197 | } | 197 | } |
198 | data->hw_snapshot = NULL; | 198 | data->hw_snapshot = NULL; |
@@ -220,7 +220,7 @@ static void css_hw_disable_snapshot(struct gr_gk20a *gr) | |||
220 | perf_pmasys_mem_block_valid_false_f() | | 220 | perf_pmasys_mem_block_valid_false_f() | |
221 | perf_pmasys_mem_block_target_f(0)); | 221 | perf_pmasys_mem_block_target_f(0)); |
222 | 222 | ||
223 | gk20a_gmmu_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); | 223 | nvgpu_dma_unmap_free(&g->mm.pmu.vm, &data->hw_memdesc); |
224 | memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); | 224 | memset(&data->hw_memdesc, 0, sizeof(data->hw_memdesc)); |
225 | data->hw_snapshot = NULL; | 225 | data->hw_snapshot = NULL; |
226 | 226 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c index d8fa7505..96b94ea7 100644 --- a/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fecs_trace_gk20a.c | |||
@@ -400,7 +400,7 @@ static int gk20a_fecs_trace_alloc_ring(struct gk20a *g) | |||
400 | { | 400 | { |
401 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 401 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
402 | 402 | ||
403 | return gk20a_gmmu_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS | 403 | return nvgpu_dma_alloc_sys(g, GK20A_FECS_TRACE_NUM_RECORDS |
404 | * ctxsw_prog_record_timestamp_record_size_in_bytes_v(), | 404 | * ctxsw_prog_record_timestamp_record_size_in_bytes_v(), |
405 | &trace->trace_buf); | 405 | &trace->trace_buf); |
406 | } | 406 | } |
@@ -409,7 +409,7 @@ static void gk20a_fecs_trace_free_ring(struct gk20a *g) | |||
409 | { | 409 | { |
410 | struct gk20a_fecs_trace *trace = g->fecs_trace; | 410 | struct gk20a_fecs_trace *trace = g->fecs_trace; |
411 | 411 | ||
412 | gk20a_gmmu_free(g, &trace->trace_buf); | 412 | nvgpu_dma_free(g, &trace->trace_buf); |
413 | } | 413 | } |
414 | 414 | ||
415 | #ifdef CONFIG_DEBUG_FS | 415 | #ifdef CONFIG_DEBUG_FS |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c index c1f94eb3..ca09c22a 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.c | |||
@@ -483,7 +483,7 @@ void gk20a_fifo_delete_runlist(struct fifo_gk20a *f) | |||
483 | for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { | 483 | for (runlist_id = 0; runlist_id < f->max_runlists; runlist_id++) { |
484 | runlist = &f->runlist_info[runlist_id]; | 484 | runlist = &f->runlist_info[runlist_id]; |
485 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | 485 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { |
486 | gk20a_gmmu_free(g, &runlist->mem[i]); | 486 | nvgpu_dma_free(g, &runlist->mem[i]); |
487 | } | 487 | } |
488 | 488 | ||
489 | nvgpu_kfree(g, runlist->active_channels); | 489 | nvgpu_kfree(g, runlist->active_channels); |
@@ -544,9 +544,9 @@ static void gk20a_remove_fifo_support(struct fifo_gk20a *f) | |||
544 | nvgpu_vfree(g, f->channel); | 544 | nvgpu_vfree(g, f->channel); |
545 | nvgpu_vfree(g, f->tsg); | 545 | nvgpu_vfree(g, f->tsg); |
546 | if (g->ops.mm.is_bar1_supported(g)) | 546 | if (g->ops.mm.is_bar1_supported(g)) |
547 | gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); | 547 | nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd); |
548 | else | 548 | else |
549 | gk20a_gmmu_free(g, &f->userd); | 549 | nvgpu_dma_free(g, &f->userd); |
550 | 550 | ||
551 | gk20a_fifo_delete_runlist(f); | 551 | gk20a_fifo_delete_runlist(f); |
552 | 552 | ||
@@ -686,7 +686,7 @@ static int init_runlist(struct gk20a *g, struct fifo_gk20a *f) | |||
686 | f->num_runlist_entries, runlist_size); | 686 | f->num_runlist_entries, runlist_size); |
687 | 687 | ||
688 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { | 688 | for (i = 0; i < MAX_RUNLIST_BUFFERS; i++) { |
689 | int err = gk20a_gmmu_alloc_sys(g, runlist_size, | 689 | int err = nvgpu_dma_alloc_sys(g, runlist_size, |
690 | &runlist->mem[i]); | 690 | &runlist->mem[i]); |
691 | if (err) { | 691 | if (err) { |
692 | dev_err(d, "memory allocation failed\n"); | 692 | dev_err(d, "memory allocation failed\n"); |
@@ -940,12 +940,12 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
940 | nvgpu_mutex_init(&f->free_chs_mutex); | 940 | nvgpu_mutex_init(&f->free_chs_mutex); |
941 | 941 | ||
942 | if (g->ops.mm.is_bar1_supported(g)) | 942 | if (g->ops.mm.is_bar1_supported(g)) |
943 | err = gk20a_gmmu_alloc_map_sys(&g->mm.bar1.vm, | 943 | err = nvgpu_dma_alloc_map_sys(&g->mm.bar1.vm, |
944 | f->userd_entry_size * f->num_channels, | 944 | f->userd_entry_size * f->num_channels, |
945 | &f->userd); | 945 | &f->userd); |
946 | 946 | ||
947 | else | 947 | else |
948 | err = gk20a_gmmu_alloc_sys(g, f->userd_entry_size * | 948 | err = nvgpu_dma_alloc_sys(g, f->userd_entry_size * |
949 | f->num_channels, &f->userd); | 949 | f->num_channels, &f->userd); |
950 | if (err) { | 950 | if (err) { |
951 | dev_err(d, "userd memory allocation failed\n"); | 951 | dev_err(d, "userd memory allocation failed\n"); |
@@ -980,9 +980,9 @@ static int gk20a_init_fifo_setup_sw(struct gk20a *g) | |||
980 | clean_up: | 980 | clean_up: |
981 | gk20a_dbg_fn("fail"); | 981 | gk20a_dbg_fn("fail"); |
982 | if (g->ops.mm.is_bar1_supported(g)) | 982 | if (g->ops.mm.is_bar1_supported(g)) |
983 | gk20a_gmmu_unmap_free(&g->mm.bar1.vm, &f->userd); | 983 | nvgpu_dma_unmap_free(&g->mm.bar1.vm, &f->userd); |
984 | else | 984 | else |
985 | gk20a_gmmu_free(g, &f->userd); | 985 | nvgpu_dma_free(g, &f->userd); |
986 | 986 | ||
987 | nvgpu_vfree(g, f->channel); | 987 | nvgpu_vfree(g, f->channel); |
988 | f->channel = NULL; | 988 | f->channel = NULL; |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index a9b6a546..af02491e 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -1938,7 +1938,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1938 | if (enable_hwpm_ctxsw) { | 1938 | if (enable_hwpm_ctxsw) { |
1939 | /* Allocate buffer if necessary */ | 1939 | /* Allocate buffer if necessary */ |
1940 | if (pm_ctx->mem.gpu_va == 0) { | 1940 | if (pm_ctx->mem.gpu_va == 0) { |
1941 | ret = gk20a_gmmu_alloc_flags_sys(g, | 1941 | ret = nvgpu_dma_alloc_flags_sys(g, |
1942 | NVGPU_DMA_NO_KERNEL_MAPPING, | 1942 | NVGPU_DMA_NO_KERNEL_MAPPING, |
1943 | g->gr.ctx_vars.pm_ctxsw_image_size, | 1943 | g->gr.ctx_vars.pm_ctxsw_image_size, |
1944 | &pm_ctx->mem); | 1944 | &pm_ctx->mem); |
@@ -1958,7 +1958,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1958 | if (!pm_ctx->mem.gpu_va) { | 1958 | if (!pm_ctx->mem.gpu_va) { |
1959 | gk20a_err(dev_from_gk20a(g), | 1959 | gk20a_err(dev_from_gk20a(g), |
1960 | "failed to map pm ctxt buffer"); | 1960 | "failed to map pm ctxt buffer"); |
1961 | gk20a_gmmu_free(g, &pm_ctx->mem); | 1961 | nvgpu_dma_free(g, &pm_ctx->mem); |
1962 | c->g->ops.fifo.enable_channel(c); | 1962 | c->g->ops.fifo.enable_channel(c); |
1963 | return -ENOMEM; | 1963 | return -ENOMEM; |
1964 | } | 1964 | } |
@@ -2018,7 +2018,7 @@ clean_up_mem: | |||
2018 | cleanup_pm_buf: | 2018 | cleanup_pm_buf: |
2019 | gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, | 2019 | gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, pm_ctx->mem.size, |
2020 | gk20a_mem_flag_none); | 2020 | gk20a_mem_flag_none); |
2021 | gk20a_gmmu_free(g, &pm_ctx->mem); | 2021 | nvgpu_dma_free(g, &pm_ctx->mem); |
2022 | memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); | 2022 | memset(&pm_ctx->mem, 0, sizeof(struct nvgpu_mem)); |
2023 | 2023 | ||
2024 | gk20a_enable_channel_tsg(g, c); | 2024 | gk20a_enable_channel_tsg(g, c); |
@@ -2318,7 +2318,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2318 | g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), | 2318 | g->gr.ctx_vars.ucode.gpccs.inst.count * sizeof(u32), |
2319 | g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); | 2319 | g->gr.ctx_vars.ucode.gpccs.data.count * sizeof(u32)); |
2320 | 2320 | ||
2321 | err = gk20a_gmmu_alloc_sys(g, ucode_size, &ucode_info->surface_desc); | 2321 | err = nvgpu_dma_alloc_sys(g, ucode_size, &ucode_info->surface_desc); |
2322 | if (err) | 2322 | if (err) |
2323 | goto clean_up; | 2323 | goto clean_up; |
2324 | 2324 | ||
@@ -2350,7 +2350,7 @@ int gr_gk20a_init_ctxsw_ucode(struct gk20a *g) | |||
2350 | if (ucode_info->surface_desc.gpu_va) | 2350 | if (ucode_info->surface_desc.gpu_va) |
2351 | gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, | 2351 | gk20a_gmmu_unmap(vm, ucode_info->surface_desc.gpu_va, |
2352 | ucode_info->surface_desc.size, gk20a_mem_flag_none); | 2352 | ucode_info->surface_desc.size, gk20a_mem_flag_none); |
2353 | gk20a_gmmu_free(g, &ucode_info->surface_desc); | 2353 | nvgpu_dma_free(g, &ucode_info->surface_desc); |
2354 | 2354 | ||
2355 | release_firmware(gpccs_fw); | 2355 | release_firmware(gpccs_fw); |
2356 | gpccs_fw = NULL; | 2356 | gpccs_fw = NULL; |
@@ -2700,7 +2700,7 @@ static void gk20a_gr_destroy_ctx_buffer(struct gk20a *g, | |||
2700 | { | 2700 | { |
2701 | if (!desc) | 2701 | if (!desc) |
2702 | return; | 2702 | return; |
2703 | gk20a_gmmu_free(g, &desc->mem); | 2703 | nvgpu_dma_free(g, &desc->mem); |
2704 | desc->destroy = NULL; | 2704 | desc->destroy = NULL; |
2705 | } | 2705 | } |
2706 | 2706 | ||
@@ -2710,7 +2710,7 @@ static int gk20a_gr_alloc_ctx_buffer(struct gk20a *g, | |||
2710 | { | 2710 | { |
2711 | int err = 0; | 2711 | int err = 0; |
2712 | 2712 | ||
2713 | err = gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, | 2713 | err = nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, |
2714 | size, &desc->mem); | 2714 | size, &desc->mem); |
2715 | if (err) | 2715 | if (err) |
2716 | return err; | 2716 | return err; |
@@ -2953,7 +2953,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2953 | if (!gr_ctx) | 2953 | if (!gr_ctx) |
2954 | return -ENOMEM; | 2954 | return -ENOMEM; |
2955 | 2955 | ||
2956 | err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, | 2956 | err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, |
2957 | gr->ctx_vars.buffer_total_size, | 2957 | gr->ctx_vars.buffer_total_size, |
2958 | &gr_ctx->mem); | 2958 | &gr_ctx->mem); |
2959 | if (err) | 2959 | if (err) |
@@ -2973,7 +2973,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2973 | return 0; | 2973 | return 0; |
2974 | 2974 | ||
2975 | err_free_mem: | 2975 | err_free_mem: |
2976 | gk20a_gmmu_free(g, &gr_ctx->mem); | 2976 | nvgpu_dma_free(g, &gr_ctx->mem); |
2977 | err_free_ctx: | 2977 | err_free_ctx: |
2978 | nvgpu_kfree(g, gr_ctx); | 2978 | nvgpu_kfree(g, gr_ctx); |
2979 | gr_ctx = NULL; | 2979 | gr_ctx = NULL; |
@@ -3022,7 +3022,7 @@ void gr_gk20a_free_gr_ctx(struct gk20a *g, | |||
3022 | 3022 | ||
3023 | gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, | 3023 | gk20a_gmmu_unmap(vm, gr_ctx->mem.gpu_va, |
3024 | gr_ctx->mem.size, gk20a_mem_flag_none); | 3024 | gr_ctx->mem.size, gk20a_mem_flag_none); |
3025 | gk20a_gmmu_free(g, &gr_ctx->mem); | 3025 | nvgpu_dma_free(g, &gr_ctx->mem); |
3026 | nvgpu_kfree(g, gr_ctx); | 3026 | nvgpu_kfree(g, gr_ctx); |
3027 | } | 3027 | } |
3028 | 3028 | ||
@@ -3051,7 +3051,7 @@ static int gr_gk20a_alloc_channel_patch_ctx(struct gk20a *g, | |||
3051 | 3051 | ||
3052 | gk20a_dbg_fn(""); | 3052 | gk20a_dbg_fn(""); |
3053 | 3053 | ||
3054 | err = gk20a_gmmu_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, | 3054 | err = nvgpu_dma_alloc_map_flags_sys(ch_vm, NVGPU_DMA_NO_KERNEL_MAPPING, |
3055 | 128 * sizeof(u32), &patch_ctx->mem); | 3055 | 128 * sizeof(u32), &patch_ctx->mem); |
3056 | if (err) | 3056 | if (err) |
3057 | return err; | 3057 | return err; |
@@ -3071,7 +3071,7 @@ static void gr_gk20a_free_channel_patch_ctx(struct channel_gk20a *c) | |||
3071 | gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, | 3071 | gk20a_gmmu_unmap(c->vm, patch_ctx->mem.gpu_va, |
3072 | patch_ctx->mem.size, gk20a_mem_flag_none); | 3072 | patch_ctx->mem.size, gk20a_mem_flag_none); |
3073 | 3073 | ||
3074 | gk20a_gmmu_free(g, &patch_ctx->mem); | 3074 | nvgpu_dma_free(g, &patch_ctx->mem); |
3075 | patch_ctx->data_count = 0; | 3075 | patch_ctx->data_count = 0; |
3076 | } | 3076 | } |
3077 | 3077 | ||
@@ -3086,7 +3086,7 @@ static void gr_gk20a_free_channel_pm_ctx(struct channel_gk20a *c) | |||
3086 | gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, | 3086 | gk20a_gmmu_unmap(c->vm, pm_ctx->mem.gpu_va, |
3087 | pm_ctx->mem.size, gk20a_mem_flag_none); | 3087 | pm_ctx->mem.size, gk20a_mem_flag_none); |
3088 | 3088 | ||
3089 | gk20a_gmmu_free(g, &pm_ctx->mem); | 3089 | nvgpu_dma_free(g, &pm_ctx->mem); |
3090 | } | 3090 | } |
3091 | } | 3091 | } |
3092 | 3092 | ||
@@ -3366,10 +3366,10 @@ static void gk20a_remove_gr_support(struct gr_gk20a *gr) | |||
3366 | 3366 | ||
3367 | gr_gk20a_free_global_ctx_buffers(g); | 3367 | gr_gk20a_free_global_ctx_buffers(g); |
3368 | 3368 | ||
3369 | gk20a_gmmu_free(g, &gr->mmu_wr_mem); | 3369 | nvgpu_dma_free(g, &gr->mmu_wr_mem); |
3370 | gk20a_gmmu_free(g, &gr->mmu_rd_mem); | 3370 | nvgpu_dma_free(g, &gr->mmu_rd_mem); |
3371 | 3371 | ||
3372 | gk20a_gmmu_free(g, &gr->compbit_store.mem); | 3372 | nvgpu_dma_free(g, &gr->compbit_store.mem); |
3373 | 3373 | ||
3374 | memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); | 3374 | memset(&gr->compbit_store, 0, sizeof(struct compbit_store_desc)); |
3375 | 3375 | ||
@@ -3658,17 +3658,17 @@ static int gr_gk20a_init_mmu_sw(struct gk20a *g, struct gr_gk20a *gr) | |||
3658 | { | 3658 | { |
3659 | int err; | 3659 | int err; |
3660 | 3660 | ||
3661 | err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); | 3661 | err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_wr_mem); |
3662 | if (err) | 3662 | if (err) |
3663 | goto err; | 3663 | goto err; |
3664 | 3664 | ||
3665 | err = gk20a_gmmu_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); | 3665 | err = nvgpu_dma_alloc_sys(g, 0x1000, &gr->mmu_rd_mem); |
3666 | if (err) | 3666 | if (err) |
3667 | goto err_free_wr_mem; | 3667 | goto err_free_wr_mem; |
3668 | return 0; | 3668 | return 0; |
3669 | 3669 | ||
3670 | err_free_wr_mem: | 3670 | err_free_wr_mem: |
3671 | gk20a_gmmu_free(g, &gr->mmu_wr_mem); | 3671 | nvgpu_dma_free(g, &gr->mmu_wr_mem); |
3672 | err: | 3672 | err: |
3673 | return -ENOMEM; | 3673 | return -ENOMEM; |
3674 | } | 3674 | } |
@@ -5215,7 +5215,7 @@ static int gk20a_init_gr_bind_fecs_elpg(struct gk20a *g) | |||
5215 | } | 5215 | } |
5216 | 5216 | ||
5217 | if (!pmu->pg_buf.cpu_va) { | 5217 | if (!pmu->pg_buf.cpu_va) { |
5218 | err = gk20a_gmmu_alloc_map_sys(vm, size, &pmu->pg_buf); | 5218 | err = nvgpu_dma_alloc_map_sys(vm, size, &pmu->pg_buf); |
5219 | if (err) { | 5219 | if (err) { |
5220 | gk20a_err(d, "failed to allocate memory\n"); | 5220 | gk20a_err(d, "failed to allocate memory\n"); |
5221 | return -ENOMEM; | 5221 | return -ENOMEM; |
diff --git a/drivers/gpu/nvgpu/gk20a/ltc_common.c b/drivers/gpu/nvgpu/gk20a/ltc_common.c index 7c73be77..03b12740 100644 --- a/drivers/gpu/nvgpu/gk20a/ltc_common.c +++ b/drivers/gpu/nvgpu/gk20a/ltc_common.c | |||
@@ -70,7 +70,7 @@ static int gk20a_ltc_alloc_phys_cbc(struct gk20a *g, | |||
70 | { | 70 | { |
71 | struct gr_gk20a *gr = &g->gr; | 71 | struct gr_gk20a *gr = &g->gr; |
72 | 72 | ||
73 | return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, | 73 | return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_FORCE_CONTIGUOUS, |
74 | compbit_backing_size, | 74 | compbit_backing_size, |
75 | &gr->compbit_store.mem); | 75 | &gr->compbit_store.mem); |
76 | } | 76 | } |
@@ -80,7 +80,7 @@ static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g, | |||
80 | { | 80 | { |
81 | struct gr_gk20a *gr = &g->gr; | 81 | struct gr_gk20a *gr = &g->gr; |
82 | 82 | ||
83 | return gk20a_gmmu_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, | 83 | return nvgpu_dma_alloc_flags_sys(g, NVGPU_DMA_NO_KERNEL_MAPPING, |
84 | compbit_backing_size, | 84 | compbit_backing_size, |
85 | &gr->compbit_store.mem); | 85 | &gr->compbit_store.mem); |
86 | } | 86 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 79654af3..cfe7745d 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -509,7 +509,7 @@ static void gk20a_remove_mm_support(struct mm_gk20a *mm) | |||
509 | 509 | ||
510 | static int gk20a_alloc_sysmem_flush(struct gk20a *g) | 510 | static int gk20a_alloc_sysmem_flush(struct gk20a *g) |
511 | { | 511 | { |
512 | return gk20a_gmmu_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush); | 512 | return nvgpu_dma_alloc_sys(g, SZ_4K, &g->mm.sysmem_flush); |
513 | } | 513 | } |
514 | 514 | ||
515 | #if defined(CONFIG_GK20A_VIDMEM) | 515 | #if defined(CONFIG_GK20A_VIDMEM) |
@@ -897,9 +897,9 @@ static int alloc_gmmu_pages(struct vm_gk20a *vm, u32 order, | |||
897 | * default. | 897 | * default. |
898 | */ | 898 | */ |
899 | if (IS_ENABLED(CONFIG_ARM64)) | 899 | if (IS_ENABLED(CONFIG_ARM64)) |
900 | err = gk20a_gmmu_alloc(g, len, &entry->mem); | 900 | err = nvgpu_dma_alloc(g, len, &entry->mem); |
901 | else | 901 | else |
902 | err = gk20a_gmmu_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, | 902 | err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_NO_KERNEL_MAPPING, |
903 | len, &entry->mem); | 903 | len, &entry->mem); |
904 | 904 | ||
905 | 905 | ||
@@ -929,7 +929,7 @@ void free_gmmu_pages(struct vm_gk20a *vm, | |||
929 | return; | 929 | return; |
930 | } | 930 | } |
931 | 931 | ||
932 | gk20a_gmmu_free(g, &entry->mem); | 932 | nvgpu_dma_free(g, &entry->mem); |
933 | } | 933 | } |
934 | 934 | ||
935 | int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) | 935 | int map_gmmu_pages(struct gk20a *g, struct gk20a_mm_entry *entry) |
@@ -1756,7 +1756,7 @@ static void gk20a_vidbuf_release(struct dma_buf *dmabuf) | |||
1756 | if (buf->dmabuf_priv) | 1756 | if (buf->dmabuf_priv) |
1757 | buf->dmabuf_priv_delete(buf->dmabuf_priv); | 1757 | buf->dmabuf_priv_delete(buf->dmabuf_priv); |
1758 | 1758 | ||
1759 | gk20a_gmmu_free(buf->g, buf->mem); | 1759 | nvgpu_dma_free(buf->g, buf->mem); |
1760 | nvgpu_kfree(buf->g, buf); | 1760 | nvgpu_kfree(buf->g, buf); |
1761 | } | 1761 | } |
1762 | 1762 | ||
@@ -1873,7 +1873,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) | |||
1873 | 1873 | ||
1874 | buf->mem->user_mem = true; | 1874 | buf->mem->user_mem = true; |
1875 | 1875 | ||
1876 | err = gk20a_gmmu_alloc_vid(g, bytes, buf->mem); | 1876 | err = nvgpu_dma_alloc_vid(g, bytes, buf->mem); |
1877 | if (err) | 1877 | if (err) |
1878 | goto err_memfree; | 1878 | goto err_memfree; |
1879 | 1879 | ||
@@ -1896,7 +1896,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes) | |||
1896 | return fd; | 1896 | return fd; |
1897 | 1897 | ||
1898 | err_bfree: | 1898 | err_bfree: |
1899 | gk20a_gmmu_free(g, buf->mem); | 1899 | nvgpu_dma_free(g, buf->mem); |
1900 | err_memfree: | 1900 | err_memfree: |
1901 | nvgpu_kfree(g, buf->mem); | 1901 | nvgpu_kfree(g, buf->mem); |
1902 | err_kfree: | 1902 | err_kfree: |
@@ -4199,7 +4199,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) | |||
4199 | 4199 | ||
4200 | gk20a_dbg_fn(""); | 4200 | gk20a_dbg_fn(""); |
4201 | 4201 | ||
4202 | err = gk20a_gmmu_alloc(g, ram_in_alloc_size_v(), inst_block); | 4202 | err = nvgpu_dma_alloc(g, ram_in_alloc_size_v(), inst_block); |
4203 | if (err) { | 4203 | if (err) { |
4204 | gk20a_err(dev, "%s: memory allocation failed\n", __func__); | 4204 | gk20a_err(dev, "%s: memory allocation failed\n", __func__); |
4205 | return err; | 4205 | return err; |
@@ -4212,7 +4212,7 @@ int gk20a_alloc_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) | |||
4212 | void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) | 4212 | void gk20a_free_inst_block(struct gk20a *g, struct nvgpu_mem *inst_block) |
4213 | { | 4213 | { |
4214 | if (inst_block->size) | 4214 | if (inst_block->size) |
4215 | gk20a_gmmu_free(g, inst_block); | 4215 | nvgpu_dma_free(g, inst_block); |
4216 | } | 4216 | } |
4217 | 4217 | ||
4218 | u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block) | 4218 | u64 gk20a_mm_inst_block_addr(struct gk20a *g, struct nvgpu_mem *inst_block) |
diff --git a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c index 7a6bfe22..547ba924 100644 --- a/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/pmu_gk20a.c | |||
@@ -3151,7 +3151,7 @@ static int gk20a_prepare_ucode(struct gk20a *g) | |||
3151 | pmu->ucode_image = (u32 *)((u8 *)pmu->desc + | 3151 | pmu->ucode_image = (u32 *)((u8 *)pmu->desc + |
3152 | pmu->desc->descriptor_size); | 3152 | pmu->desc->descriptor_size); |
3153 | 3153 | ||
3154 | err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, | 3154 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_UCODE_SIZE_MAX, |
3155 | &pmu->ucode); | 3155 | &pmu->ucode); |
3156 | if (err) | 3156 | if (err) |
3157 | goto err_release_fw; | 3157 | goto err_release_fw; |
@@ -3225,7 +3225,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) | |||
3225 | 3225 | ||
3226 | INIT_WORK(&pmu->pg_init, pmu_setup_hw); | 3226 | INIT_WORK(&pmu->pg_init, pmu_setup_hw); |
3227 | 3227 | ||
3228 | err = gk20a_gmmu_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, | 3228 | err = nvgpu_dma_alloc_map_sys(vm, GK20A_PMU_SEQ_BUF_SIZE, |
3229 | &pmu->seq_buf); | 3229 | &pmu->seq_buf); |
3230 | if (err) { | 3230 | if (err) { |
3231 | gk20a_err(d, "failed to allocate memory\n"); | 3231 | gk20a_err(d, "failed to allocate memory\n"); |
@@ -3242,7 +3242,7 @@ static int gk20a_init_pmu_setup_sw(struct gk20a *g) | |||
3242 | 3242 | ||
3243 | pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; | 3243 | pmu->seq_buf.size = GK20A_PMU_SEQ_BUF_SIZE; |
3244 | 3244 | ||
3245 | err = gk20a_gmmu_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, | 3245 | err = nvgpu_dma_alloc_map(vm, GK20A_PMU_TRACE_BUFSIZE, |
3246 | &pmu->trace_buf); | 3246 | &pmu->trace_buf); |
3247 | if (err) { | 3247 | if (err) { |
3248 | gk20a_err(d, "failed to allocate pmu trace buffer\n"); | 3248 | gk20a_err(d, "failed to allocate pmu trace buffer\n"); |
@@ -3255,7 +3255,7 @@ skip_init: | |||
3255 | gk20a_dbg_fn("done"); | 3255 | gk20a_dbg_fn("done"); |
3256 | return 0; | 3256 | return 0; |
3257 | err_free_seq_buf: | 3257 | err_free_seq_buf: |
3258 | gk20a_gmmu_unmap_free(vm, &pmu->seq_buf); | 3258 | nvgpu_dma_unmap_free(vm, &pmu->seq_buf); |
3259 | err_free_seq: | 3259 | err_free_seq: |
3260 | nvgpu_kfree(g, pmu->seq); | 3260 | nvgpu_kfree(g, pmu->seq); |
3261 | err_free_mutex: | 3261 | err_free_mutex: |
@@ -4760,7 +4760,7 @@ int gk20a_pmu_vidmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | |||
4760 | struct vm_gk20a *vm = &mm->pmu.vm; | 4760 | struct vm_gk20a *vm = &mm->pmu.vm; |
4761 | int err; | 4761 | int err; |
4762 | 4762 | ||
4763 | err = gk20a_gmmu_alloc_map_vid(vm, size, mem); | 4763 | err = nvgpu_dma_alloc_map_vid(vm, size, mem); |
4764 | if (err) { | 4764 | if (err) { |
4765 | gk20a_err(g->dev, "memory allocation failed"); | 4765 | gk20a_err(g->dev, "memory allocation failed"); |
4766 | return -ENOMEM; | 4766 | return -ENOMEM; |
@@ -4776,7 +4776,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | |||
4776 | struct vm_gk20a *vm = &mm->pmu.vm; | 4776 | struct vm_gk20a *vm = &mm->pmu.vm; |
4777 | int err; | 4777 | int err; |
4778 | 4778 | ||
4779 | err = gk20a_gmmu_alloc_map_sys(vm, size, mem); | 4779 | err = nvgpu_dma_alloc_map_sys(vm, size, mem); |
4780 | if (err) { | 4780 | if (err) { |
4781 | gk20a_err(g->dev, "failed to allocate memory\n"); | 4781 | gk20a_err(g->dev, "failed to allocate memory\n"); |
4782 | return -ENOMEM; | 4782 | return -ENOMEM; |
@@ -4787,7 +4787,7 @@ int gk20a_pmu_sysmem_surface_alloc(struct gk20a *g, struct nvgpu_mem *mem, | |||
4787 | 4787 | ||
4788 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) | 4788 | void gk20a_pmu_surface_free(struct gk20a *g, struct nvgpu_mem *mem) |
4789 | { | 4789 | { |
4790 | gk20a_gmmu_free(g, mem); | 4790 | nvgpu_dma_free(g, mem); |
4791 | memset(mem, 0, sizeof(struct nvgpu_mem)); | 4791 | memset(mem, 0, sizeof(struct nvgpu_mem)); |
4792 | } | 4792 | } |
4793 | 4793 | ||