summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSami Kiminki <skiminki@nvidia.com>2017-11-02 06:34:57 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-08 12:09:08 -0500
commitc22a5af9137394524f76e1f54b4e48fe92714fec (patch)
tree9a8074d57e5e3b67b86cc80c8b3638dbecbce061
parent02d281d0776e2b8305b18823343a2ee972b72657 (diff)
gpu: nvgpu: Remove support for legacy mapping
Make NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL mandatory for all map IOCTLs. We'll clean up the legacy kernel code in subsequent patches. Remove support for NVGPU_AS_IOCTL_MAP_BUFFER. It has been superseded by NVGPU_AS_IOCTL_MAP_BUFFER_EX. Remove legacy definitions to nvgpu_map_buffer_args and the related flags, and update the in-kernel map calls accordingly by switching to the newer definitions. Bug 1902982 Change-Id: Ie9a7f02b8d5d0ec7c3722c4481afab6d39b4fbd0 Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1560932 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r--drivers/gpu/nvgpu/common/linux/cde.c2
-rw-r--r--drivers/gpu/nvgpu/common/linux/ioctl_as.c27
-rw-r--r--drivers/gpu/nvgpu/common/mm/gmmu.c2
-rw-r--r--drivers/gpu/nvgpu/gk20a/gr_gk20a.c10
-rw-r--r--drivers/gpu/nvgpu/gp10b/gr_gp10b.c2
-rw-r--r--drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c3
-rw-r--r--drivers/gpu/nvgpu/vgpu/mm_vgpu.c3
-rw-r--r--include/uapi/linux/nvgpu.h85
8 files changed, 37 insertions, 97 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c
index 775f9657..5063ba88 100644
--- a/drivers/gpu/nvgpu/common/linux/cde.c
+++ b/drivers/gpu/nvgpu/common/linux/cde.c
@@ -1279,7 +1279,7 @@ static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx)
1279 /* map backing store to gpu virtual space */ 1279 /* map backing store to gpu virtual space */
1280 vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem, 1280 vaddr = nvgpu_gmmu_map(ch->vm, &gr->compbit_store.mem,
1281 g->gr.compbit_store.mem.size, 1281 g->gr.compbit_store.mem.size,
1282 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1282 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
1283 gk20a_mem_flag_read_only, 1283 gk20a_mem_flag_read_only,
1284 false, 1284 false,
1285 gr->compbit_store.mem.aperture); 1285 gr->compbit_store.mem.aperture);
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
index e566bfb4..8a5318e4 100644
--- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c
@@ -88,8 +88,8 @@ static int gk20a_as_ioctl_map_buffer_ex(
88 compressible_kind = args->compr_kind; 88 compressible_kind = args->compr_kind;
89 incompressible_kind = args->incompr_kind; 89 incompressible_kind = args->incompr_kind;
90 } else { 90 } else {
91 compressible_kind = args->kind; 91 /* unsupported, direct kind control must be used */
92 incompressible_kind = NV_KIND_INVALID; 92 return -EINVAL;
93 } 93 }
94 94
95 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd, 95 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
@@ -100,19 +100,6 @@ static int gk20a_as_ioctl_map_buffer_ex(
100 NULL); 100 NULL);
101} 101}
102 102
103static int gk20a_as_ioctl_map_buffer(
104 struct gk20a_as_share *as_share,
105 struct nvgpu_as_map_buffer_args *args)
106{
107 gk20a_dbg_fn("");
108 return nvgpu_vm_map_buffer(as_share->vm, args->dmabuf_fd,
109 &args->o_a.offset,
110 args->flags, NV_KIND_DEFAULT,
111 NV_KIND_DEFAULT,
112 0, 0, NULL);
113 /* args->o_a.offset will be set if !err */
114}
115
116static int gk20a_as_ioctl_unmap_buffer( 103static int gk20a_as_ioctl_unmap_buffer(
117 struct gk20a_as_share *as_share, 104 struct gk20a_as_share *as_share,
118 struct nvgpu_as_unmap_buffer_args *args) 105 struct nvgpu_as_unmap_buffer_args *args)
@@ -187,8 +174,9 @@ static int gk20a_as_ioctl_map_buffer_batch(
187 compressible_kind = map_args.compr_kind; 174 compressible_kind = map_args.compr_kind;
188 incompressible_kind = map_args.incompr_kind; 175 incompressible_kind = map_args.incompr_kind;
189 } else { 176 } else {
190 compressible_kind = map_args.kind; 177 /* direct kind control must be used */
191 incompressible_kind = NV_KIND_INVALID; 178 err = -EINVAL;
179 break;
192 } 180 }
193 181
194 err = nvgpu_vm_map_buffer( 182 err = nvgpu_vm_map_buffer(
@@ -348,11 +336,6 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
348 err = gk20a_as_ioctl_free_space(as_share, 336 err = gk20a_as_ioctl_free_space(as_share,
349 (struct nvgpu_as_free_space_args *)buf); 337 (struct nvgpu_as_free_space_args *)buf);
350 break; 338 break;
351 case NVGPU_AS_IOCTL_MAP_BUFFER:
352 trace_gk20a_as_ioctl_map_buffer(g->name);
353 err = gk20a_as_ioctl_map_buffer(as_share,
354 (struct nvgpu_as_map_buffer_args *)buf);
355 break;
356 case NVGPU_AS_IOCTL_MAP_BUFFER_EX: 339 case NVGPU_AS_IOCTL_MAP_BUFFER_EX:
357 trace_gk20a_as_ioctl_map_buffer(g->name); 340 trace_gk20a_as_ioctl_map_buffer(g->name);
358 err = gk20a_as_ioctl_map_buffer_ex(as_share, 341 err = gk20a_as_ioctl_map_buffer_ex(as_share,
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c
index d6aaf8cd..875bcc4e 100644
--- a/drivers/gpu/nvgpu/common/mm/gmmu.c
+++ b/drivers/gpu/nvgpu/common/mm/gmmu.c
@@ -680,7 +680,7 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm,
680 .pgsz = pgsz_idx, 680 .pgsz = pgsz_idx,
681 .kind_v = kind_v, 681 .kind_v = kind_v,
682 .ctag = (u64)ctag_offset * (u64)ctag_granularity, 682 .ctag = (u64)ctag_offset * (u64)ctag_granularity,
683 .cacheable = flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 683 .cacheable = flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
684 .rw_flag = rw_flag, 684 .rw_flag = rw_flag,
685 .sparse = sparse, 685 .sparse = sparse,
686 .priv = priv, 686 .priv = priv,
diff --git a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
index 3875ec5c..d4d6cd2d 100644
--- a/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/gr_gk20a.c
@@ -1738,7 +1738,7 @@ int gr_gk20a_update_hwpm_ctxsw_mode(struct gk20a *g,
1738 pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm, 1738 pm_ctx->mem.gpu_va = nvgpu_gmmu_map(c->vm,
1739 &pm_ctx->mem, 1739 &pm_ctx->mem,
1740 pm_ctx->mem.size, 1740 pm_ctx->mem.size,
1741 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 1741 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
1742 gk20a_mem_flag_none, true, 1742 gk20a_mem_flag_none, true,
1743 pm_ctx->mem.aperture); 1743 pm_ctx->mem.aperture);
1744 if (!pm_ctx->mem.gpu_va) { 1744 if (!pm_ctx->mem.gpu_va) {
@@ -2633,7 +2633,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2633 } 2633 }
2634 2634
2635 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2635 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2636 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2636 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
2637 gk20a_mem_flag_none, true, mem->aperture); 2637 gk20a_mem_flag_none, true, mem->aperture);
2638 if (!gpu_va) 2638 if (!gpu_va)
2639 goto clean_up; 2639 goto clean_up;
@@ -2651,7 +2651,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2651 } 2651 }
2652 2652
2653 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2653 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2654 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2654 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
2655 gk20a_mem_flag_none, false, mem->aperture); 2655 gk20a_mem_flag_none, false, mem->aperture);
2656 if (!gpu_va) 2656 if (!gpu_va)
2657 goto clean_up; 2657 goto clean_up;
@@ -2669,7 +2669,7 @@ static int gr_gk20a_map_global_ctx_buffers(struct gk20a *g,
2669 } 2669 }
2670 2670
2671 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size, 2671 gpu_va = nvgpu_gmmu_map(ch_vm, mem, mem->size,
2672 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 2672 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
2673 gk20a_mem_flag_none, true, mem->aperture); 2673 gk20a_mem_flag_none, true, mem->aperture);
2674 if (!gpu_va) 2674 if (!gpu_va)
2675 goto clean_up; 2675 goto clean_up;
@@ -2736,7 +2736,7 @@ int gr_gk20a_alloc_gr_ctx(struct gk20a *g,
2736 gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm, 2736 gr_ctx->mem.gpu_va = nvgpu_gmmu_map(vm,
2737 &gr_ctx->mem, 2737 &gr_ctx->mem,
2738 gr_ctx->mem.size, 2738 gr_ctx->mem.size,
2739 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE, 2739 0, /* not GPU-cacheable */
2740 gk20a_mem_flag_none, true, 2740 gk20a_mem_flag_none, true,
2741 gr_ctx->mem.aperture); 2741 gr_ctx->mem.aperture);
2742 if (!gr_ctx->mem.gpu_va) 2742 if (!gr_ctx->mem.gpu_va)
diff --git a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
index f1180750..66d48e6a 100644
--- a/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
+++ b/drivers/gpu/nvgpu/gp10b/gr_gp10b.c
@@ -913,7 +913,7 @@ int gr_gp10b_alloc_buffer(struct vm_gk20a *vm, size_t size,
913 mem->gpu_va = nvgpu_gmmu_map(vm, 913 mem->gpu_va = nvgpu_gmmu_map(vm,
914 mem, 914 mem,
915 mem->aligned_size, 915 mem->aligned_size,
916 NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE, 916 NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE,
917 gk20a_mem_flag_none, 917 gk20a_mem_flag_none,
918 false, 918 false,
919 mem->aperture); 919 mem->aperture);
diff --git a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
index f063961f..e4437ed2 100644
--- a/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
+++ b/drivers/gpu/nvgpu/vgpu/gp10b/vgpu_mm_gp10b.c
@@ -169,8 +169,7 @@ u64 vgpu_gp10b_locked_gmmu_map(struct vm_gk20a *vm,
169 p->pgsz_idx = pgsz_idx; 169 p->pgsz_idx = pgsz_idx;
170 p->iova = 0; 170 p->iova = 0;
171 p->kind = kind_v; 171 p->kind = kind_v;
172 p->cacheable = 172 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
173 (flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0;
174 p->prot = prot; 173 p->prot = prot;
175 p->ctag_offset = ctag_offset; 174 p->ctag_offset = ctag_offset;
176 p->clear_ctags = clear_ctags; 175 p->clear_ctags = clear_ctags;
diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
index c4256afb..498a1528 100644
--- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
+++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c
@@ -153,8 +153,7 @@ u64 vgpu_locked_gmmu_map(struct vm_gk20a *vm,
153 p->pgsz_idx = pgsz_idx; 153 p->pgsz_idx = pgsz_idx;
154 p->iova = mapping ? 1 : 0; 154 p->iova = mapping ? 1 : 0;
155 p->kind = kind_v; 155 p->kind = kind_v;
156 p->cacheable = 156 p->cacheable = (flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE) ? 1 : 0;
157 (flags & NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE) ? 1 : 0;
158 p->prot = prot; 157 p->prot = prot;
159 p->ctag_offset = ctag_offset; 158 p->ctag_offset = ctag_offset;
160 p->clear_ctags = clear_ctags; 159 p->clear_ctags = clear_ctags;
diff --git a/include/uapi/linux/nvgpu.h b/include/uapi/linux/nvgpu.h
index b8e1e71c..9d1a5bdb 100644
--- a/include/uapi/linux/nvgpu.h
+++ b/include/uapi/linux/nvgpu.h
@@ -1461,27 +1461,6 @@ struct nvgpu_submit_gpfifo_args {
1461 struct nvgpu_fence fence; 1461 struct nvgpu_fence fence;
1462}; 1462};
1463 1463
1464struct nvgpu_map_buffer_args {
1465 __u32 flags;
1466#define NVGPU_MAP_BUFFER_FLAGS_ALIGN 0x0
1467#define NVGPU_MAP_BUFFER_FLAGS_OFFSET (1 << 0)
1468#define NVGPU_MAP_BUFFER_FLAGS_KIND_PITCH 0x0
1469#define NVGPU_MAP_BUFFER_FLAGS_KIND_SPECIFIED (1 << 1)
1470#define NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_FALSE 0x0
1471#define NVGPU_MAP_BUFFER_FLAGS_CACHEABLE_TRUE (1 << 2)
1472 __u32 nvmap_handle;
1473 union {
1474 __u64 offset; /* valid if _offset flag given (in|out) */
1475 __u64 align; /* alignment multiple (0:={1 or n/a}) */
1476 } offset_alignment;
1477 __u32 kind;
1478#define NVGPU_MAP_BUFFER_KIND_GENERIC_16BX2 0xfe
1479};
1480
1481struct nvgpu_unmap_buffer_args {
1482 __u64 offset;
1483};
1484
1485struct nvgpu_wait_args { 1464struct nvgpu_wait_args {
1486#define NVGPU_WAIT_TYPE_NOTIFIER 0x0 1465#define NVGPU_WAIT_TYPE_NOTIFIER 0x0
1487#define NVGPU_WAIT_TYPE_SEMAPHORE 0x1 1466#define NVGPU_WAIT_TYPE_SEMAPHORE 0x1
@@ -1789,22 +1768,12 @@ struct nvgpu_as_bind_channel_args {
1789 * chosen will be returned back to the caller in the 'page_size' parameter in 1768 * chosen will be returned back to the caller in the 'page_size' parameter in
1790 * that case. 1769 * that case.
1791 */ 1770 */
1792struct nvgpu_as_map_buffer_args {
1793 __u32 flags; /* in/out */
1794#define NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET (1 << 0) 1771#define NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET (1 << 0)
1795#define NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE (1 << 2) 1772#define NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE (1 << 2)
1796#define NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT (1 << 4) 1773#define NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT (1 << 4)
1797#define NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE (1 << 5) 1774#define NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE (1 << 5)
1798#define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6) 1775#define NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS (1 << 6)
1799#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8) 1776#define NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL (1 << 8)
1800 __u32 reserved; /* in */
1801 __u32 dmabuf_fd; /* in */
1802 __u32 page_size; /* inout, 0:= best fit to buffer */
1803 union {
1804 __u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */
1805 __u64 align; /* in, alignment multiple (0:={1 or n/a}) */
1806 } o_a;
1807};
1808 1777
1809/* 1778/*
1810 * Mapping dmabuf fds into an address space: 1779 * Mapping dmabuf fds into an address space:
@@ -1816,39 +1785,29 @@ struct nvgpu_as_map_buffer_args {
1816 * returned back to the caller in the 'page_size' parameter in that case. 1785 * returned back to the caller in the 'page_size' parameter in that case.
1817 */ 1786 */
1818struct nvgpu_as_map_buffer_ex_args { 1787struct nvgpu_as_map_buffer_ex_args {
1788 /* NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL must be set */
1819 __u32 flags; /* in/out */ 1789 __u32 flags; /* in/out */
1820#define NV_KIND_DEFAULT -1 1790
1821 union { 1791 /*
1822 /* 1792 * - If both compr_kind and incompr_kind are set
1823 * Used if NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL 1793 * (i.e., value is other than NV_KIND_INVALID),
1824 * is not set. 1794 * kernel attempts to use compr_kind first.
1825 */ 1795 *
1826 __s32 kind; /* in (-1 represents default) */ 1796 * - If compr_kind is set, kernel attempts to allocate
1827 1797 * comptags for the buffer. If successful,
1828 /* 1798 * compr_kind is used as the PTE kind.
1829 * If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is 1799 *
1830 * set, this is used, instead. The rules are: 1800 * - If incompr_kind is set, kernel uses incompr_kind as the
1831 * 1801 * PTE kind, if compr_kind cannot be used. Comptags are not
1832 * - If both compr_kind and incompr_kind are set 1802 * allocated.
1833 * (i.e., value is other than NV_KIND_INVALID), 1803 *
1834 * kernel attempts to use compr_kind first. 1804 * - If neither compr_kind or incompr_kind is set, the
1835 * 1805 * map call will fail.
1836 * - If compr_kind is set, kernel attempts to allocate 1806 */
1837 * comptags for the buffer. If successful,
1838 * compr_kind is used as the PTE kind.
1839 *
1840 * - If incompr_kind is set, kernel uses incompr_kind
1841 * as the PTE kind. Comptags are not allocated.
1842 *
1843 * - If neither compr_kind or incompr_kind is set, the
1844 * map call will fail.
1845 */
1846#define NV_KIND_INVALID -1 1807#define NV_KIND_INVALID -1
1847 struct { 1808 __s16 compr_kind;
1848 __s16 compr_kind; 1809 __s16 incompr_kind;
1849 __s16 incompr_kind; 1810
1850 };
1851 };
1852 __u32 dmabuf_fd; /* in */ 1811 __u32 dmabuf_fd; /* in */
1853 __u32 page_size; /* inout, 0:= best fit to buffer */ 1812 __u32 page_size; /* inout, 0:= best fit to buffer */
1854 1813
@@ -1975,7 +1934,7 @@ struct nvgpu_as_get_va_regions_args {
1975}; 1934};
1976 1935
1977struct nvgpu_as_map_buffer_batch_args { 1936struct nvgpu_as_map_buffer_batch_args {
1978 __u64 unmaps; /* ptr to array of nvgpu_unmap_buffer_args */ 1937 __u64 unmaps; /* ptr to array of nvgpu_as_unmap_buffer_args */
1979 __u64 maps; /* ptr to array of nvgpu_as_map_buffer_ex_args */ 1938 __u64 maps; /* ptr to array of nvgpu_as_map_buffer_ex_args */
1980 __u32 num_unmaps; /* in: number of unmaps 1939 __u32 num_unmaps; /* in: number of unmaps
1981 * out: on error, number of successful unmaps */ 1940 * out: on error, number of successful unmaps */