diff options
Diffstat (limited to 'drivers/gpu/nvgpu')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/cde.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/ioctl_as.c | 27 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/gmmu.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gk20a/gr_gk20a.c | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/gp10b/gr_gp10b.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | 3 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 3 |
7 files changed, 15 insertions, 34 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c index 775f9657..5063ba88 100644 --- a/drivers/gpu/nvgpu/common/linux/cde.c +++ b/drivers/gpu/nvgpu/common/linux/cde.c | |||
@@ -1279,7 +1279,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx) | |||
1279 | /* map backing store to gpu virtual space */ | 1279 | /* map backing store to gpu virtual space */ |
1280 | vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem, | 1280 | vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem, |
1281 | g->gr.compbit_store.mem.size, | 1281 | g->gr.compbit_store.mem.size, |
1282 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 1282 | NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
1283 | gk20a_mem_flag_read_only, | 1283 | gk20a_mem_flag_read_only, |
1284 | false, | 1284 | false, |
1285 | gr->compbit_store.mem.aperture); | 1285 | gr->compbit_store.mem.aperture); |
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c index e566bfb4..8a5318e4 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c | |||
@@ -88,8 +88,8 @@ static int gk20a_as_ioctl_map_buffer_ex( | |||
88 | compressible_kind = args->compr_kind; | 88 | compressible_kind = args->compr_kind; |
89 | incompressible_kind = args->incompr_kind; | 89 | incompressible_kind = args->incompr_kind; |
90 | } else { | 90 | } else { |
91 | compressible_kind = args->kind; | 91 | /* unsupported, direct kind control must be used */ |
92 | incompressible_kind = NV_KIND_INVALID; | 92 | return -EINVAL; |
93 | } | 93 | } |
94 | 94 | ||
95 | return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd, | 95 | return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd, |
@@ -100,19 +100,6 @@ static int gk20a_as_ioctl_map_buffer_ex( | |||
100 | NULL); | 100 | NULL); |
101 | } | 101 | } |
102 | 102 | ||
103 | static int gk20a_as_ioctl_map_buffer( | ||
104 | struct gk20a_as_share *as_share, | ||
105 | struct nvgpu_as_map_buffer_args *args) | ||
106 | { | ||
107 | gk20a_dbg_fn(""); | ||
108 | return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd, | ||
109 | &args->o_a.offset, | ||
110 | args->flags, NV_KIND_DEFAULT, | ||
111 | NV_KIND_DEFAULT, | ||
112 | 0, 0, NULL); | ||
113 | /* args->o_a.offset will be set if !err */ | ||
114 | } | ||
115 | |||
116 | static int gk20a_as_ioctl_unmap_buffer( | 103 | static int gk20a_as_ioctl_unmap_buffer( |
117 | struct gk20a_as_share *as_share, | 104 | struct gk20a_as_share *as_share, |
118 | struct nvgpu_as_unmap_buffer_args *args) | 105 | struct nvgpu_as_unmap_buffer_args *args) |
@@ -187,8 +174,9 @@ static int gk20a_as_ioctl_map_buffer_batch( | |||
187 | compressible_kind = map_args.compr_kind; | 174 | compressible_kind = map_args.compr_kind; |
188 | incompressible_kind = map_args.incompr_kind; | 175 | incompressible_kind = map_args.incompr_kind; |
189 | } else { | 176 | } else { |
190 | compressible_kind = map_args.kind; | 177 | /* direct kind control must be used */ |
191 | incompressible_kind = NV_KIND_INVALID; | 178 | err = -EINVAL; |
179 | break; | ||
192 | } | 180 | } |
193 | 181 | ||
194 | err = nvgpu_vm_map_buffer( | 182 | err = nvgpu_vm_map_buffer( |
@@ -348,11 +336,6 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
348 | err = gk20a_as_ioctl_free_space(as_share, | 336 | err = gk20a_as_ioctl_free_space(as_share, |
349 | (struct nvgpu_as_free_space_args *)buf); | 337 | (struct nvgpu_as_free_space_args *)buf); |
350 | break; | 338 | break; |
351 | case NVGPU_AS_IOCTL_MAP_BUFFER: | ||
352 | trace_gk20a_as_ioctl_map_buffer(g->name); | ||
353 | err = gk20a_as_ioctl_map_buffer(as_share, | ||
354 | (struct nvgpu_as_map_buffer_args *)buf); | ||
355 | break; | ||
356 | case NVGPU_AS_IOCTL_MAP_BUFFER_EX: | 339 | case NVGPU_AS_IOCTL_MAP_BUFFER_EX: |
357 | trace_gk20a_as_ioctl_map_buffer(g->name); | 340 | trace_gk20a_as_ioctl_map_buffer(g->name); |
358 | err = gk20a_as_ioctl_map_buffer_ex(as_share, | 341 | err = gk20a_as_ioctl_map_buffer_ex(as_share, |
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index d6aaf8cd..875bcc4e 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c | |||
@@ -680,7 +680,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
680 | .pgsz = pgsz_idx, | 680 | .pgsz = pgsz_idx, |
681 | .kind_v = kind_v, | 681 | .kind_v = kind_v, |
682 | .ctag = (u64)ctag_offset * (u64)ctag_granularity, | 682 | .ctag = (u64)ctag_offset * (u64)ctag_granularity, |
683 | .cacheable = flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 683 | .cacheable = flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
684 | .rw_flag = rw_flag, | 684 | .rw_flag = rw_flag, |
685 | .sparse = sparse, | 685 | .sparse = sparse, |
686 | .priv = priv, | 686 | .priv = priv, |
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c index 3875ec5c..d4d6cd2d 100644 --- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c | |||
@@ -1738,7 +1738,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g, | |||
1738 | pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm, | 1738 | pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm, |
1739 | &pm_ctx->mem, | 1739 | &pm_ctx->mem, |
1740 | pm_ctx->mem.size, | 1740 | pm_ctx->mem.size, |
1741 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 1741 | NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
1742 | gk20a_mem_flag_none, true, | 1742 | gk20a_mem_flag_none, true, |
1743 | pm_ctx->mem.aperture); | 1743 | pm_ctx->mem.aperture); |
1744 | if (!pm_ctx->mem.gpu_va) { | 1744 | if (!pm_ctx->mem.gpu_va) { |
@@ -2633,7 +2633,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2633 | } | 2633 | } |
2634 | 2634 | ||
2635 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, | 2635 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, |
2636 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 2636 | NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
2637 | gk20a_mem_flag_none, true, mem->aperture); | 2637 | gk20a_mem_flag_none, true, mem->aperture); |
2638 | if (!gpu_va) | 2638 | if (!gpu_va) |
2639 | goto clean_up; | 2639 | goto clean_up; |
@@ -2651,7 +2651,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2651 | } | 2651 | } |
2652 | 2652 | ||
2653 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, | 2653 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, |
2654 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 2654 | NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
2655 | gk20a_mem_flag_none, false, mem->aperture); | 2655 | gk20a_mem_flag_none, false, mem->aperture); |
2656 | if (!gpu_va) | 2656 | if (!gpu_va) |
2657 | goto clean_up; | 2657 | goto clean_up; |
@@ -2669,7 +2669,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g, | |||
2669 | } | 2669 | } |
2670 | 2670 | ||
2671 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, | 2671 | gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, |
2672 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 2672 | NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
2673 | gk20a_mem_flag_none, true, mem->aperture); | 2673 | gk20a_mem_flag_none, true, mem->aperture); |
2674 | if (!gpu_va) | 2674 | if (!gpu_va) |
2675 | goto clean_up; | 2675 | goto clean_up; |
@@ -2736,7 +2736,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g, | |||
2736 | gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm, | 2736 | gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm, |
2737 | &gr_ctx->mem, | 2737 | &gr_ctx->mem, |
2738 | gr_ctx->mem.size, | 2738 | gr_ctx->mem.size, |
2739 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE, | 2739 | 0, /* not GPU-cacheable */ |
2740 | gk20a_mem_flag_none, true, | 2740 | gk20a_mem_flag_none, true, |
2741 | gr_ctx->mem.aperture); | 2741 | gr_ctx->mem.aperture); |
2742 | if (!gr_ctx->mem.gpu_va) | 2742 | if (!gr_ctx->mem.gpu_va) |
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c index f1180750..66d48e6a 100644 --- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c +++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c | |||
@@ -913,7 +913,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size, | |||
913 | mem->gpu_va = nvgpu_gmmu_map(vm, | 913 | mem->gpu_va = nvgpu_gmmu_map(vm, |
914 | mem, | 914 | mem, |
915 | mem->aligned_size, | 915 | mem->aligned_size, |
916 | NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, | 916 | NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, |
917 | gk20a_mem_flag_none, | 917 | gk20a_mem_flag_none, |
918 | false, | 918 | false, |
919 | mem->aperture); | 919 | mem->aperture); |
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c index f063961f..e4437ed2 100644 --- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c +++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c | |||
@@ -169,8 +169,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm, | |||
169 | p->pgsz_idx = pgsz_idx; | 169 | p->pgsz_idx = pgsz_idx; |
170 | p->iova = 0; | 170 | p->iova = 0; |
171 | p->kind = kind_v; | 171 | p->kind = kind_v; |
172 | p->cacheable = | 172 | p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0; |
173 | (flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0; | ||
174 | p->prot = prot; | 173 | p->prot = prot; |
175 | p->ctag_offset = ctag_offset; | 174 | p->ctag_offset = ctag_offset; |
176 | p->clear_ctags = clear_ctags; | 175 | p->clear_ctags = clear_ctags; |
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index c4256afb..498a1528 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c | |||
@@ -153,8 +153,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm, | |||
153 | p->pgsz_idx = pgsz_idx; | 153 | p->pgsz_idx = pgsz_idx; |
154 | p->iova = mapping ? 1 : 0; | 154 | p->iova = mapping ? 1 : 0; |
155 | p->kind = kind_v; | 155 | p->kind = kind_v; |
156 | p->cacheable = | 156 | p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0; |
157 | (flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0; | ||
158 | p->prot = prot; | 157 | p->prot = prot; |
159 | p->ctag_offset = ctag_offset; | 158 | p->ctag_offset = ctag_offset; |
160 | p->clear_ctags = clear_ctags; | 159 | p->clear_ctags = clear_ctags; |