diff options
author | Alex Waterman <alexw@nvidia.com> | 2017-11-16 15:56:53 -0500 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-11-17 19:17:20 -0500 |
commit | 35ae4194a05d47aa6d79353428f81f2ca47ce90f (patch) | |
tree | 73c7f15348e1f5deb411392f41e339572b797bb4 /drivers/gpu/nvgpu/common/mm | |
parent | b42fb7ba26b565f93118fbdd9e17b42ee6144c5e (diff) |
gpu: nvgpu: Add translation for NVGPU MM flags
Add a translation layer to convert from the NVGPU_AS_* flags to
to new set of NVGPU_VM_MAP_* and NVGPU_VM_AREA_ALLOC_* flags.
This allows the common MM code to not depend on the UAPI header
defined for Linux.
In addition to this change a couple of other small changes were
made:
1. Deprecate, print a warning, and ignore usage of the
NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS flag.
2. Move the t19x IO coherence flag from the t19x UAPI header
to the regular UAPI header.
JIRA NVGPU-293
Change-Id: I146402b0e8617294374e63e78f8826c57cd3b291
Signed-off-by: Alex Waterman <alexw@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1599802
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/gmmu.c | 8 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/gmmu_t19x.c | 6 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 27 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm_area.c | 6 |
4 files changed, 20 insertions, 27 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu.c b/drivers/gpu/nvgpu/common/mm/gmmu.c index 8ad7dac7..4d622412 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu.c | |||
@@ -20,8 +20,6 @@ | |||
20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <uapi/linux/nvgpu.h> | ||
24 | |||
25 | #include <nvgpu/log.h> | 23 | #include <nvgpu/log.h> |
26 | #include <nvgpu/list.h> | 24 | #include <nvgpu/list.h> |
27 | #include <nvgpu/dma.h> | 25 | #include <nvgpu/dma.h> |
@@ -682,12 +680,12 @@ u64 gk20a_locked_gmmu_map(struct vm_gk20a *vm, | |||
682 | .pgsz = pgsz_idx, | 680 | .pgsz = pgsz_idx, |
683 | .kind_v = kind_v, | 681 | .kind_v = kind_v, |
684 | .ctag = (u64)ctag_offset * (u64)ctag_granularity, | 682 | .ctag = (u64)ctag_offset * (u64)ctag_granularity, |
685 | .cacheable = flags & NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE, | 683 | .cacheable = flags & NVGPU_VM_MAP_CACHEABLE, |
686 | .rw_flag = rw_flag, | 684 | .rw_flag = rw_flag, |
687 | .sparse = sparse, | 685 | .sparse = sparse, |
688 | .priv = priv, | 686 | .priv = priv, |
689 | .coherent = flags & NVGPU_AS_MAP_BUFFER_FLAGS_IO_COHERENT, | 687 | .coherent = flags & NVGPU_VM_MAP_IO_COHERENT, |
690 | .valid = !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_UNMAPPED_PTE), | 688 | .valid = !(flags & NVGPU_VM_MAP_UNMAPPED_PTE), |
691 | .aperture = aperture | 689 | .aperture = aperture |
692 | }; | 690 | }; |
693 | 691 | ||
diff --git a/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c b/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c index 9f9c188d..f2386b3f 100644 --- a/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c +++ b/drivers/gpu/nvgpu/common/mm/gmmu_t19x.c | |||
@@ -20,12 +20,10 @@ | |||
20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <uapi/linux/nvgpu.h> | ||
24 | |||
25 | #include <nvgpu/gmmu.h> | 23 | #include <nvgpu/gmmu.h> |
24 | #include <nvgpu/vm.h> | ||
26 | 25 | ||
27 | void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags) | 26 | void nvgpu_gmmu_add_t19x_attrs(struct nvgpu_gmmu_attrs *attrs, u32 flags) |
28 | { | 27 | { |
29 | attrs->t19x_attrs.l3_alloc = (bool)(flags & | 28 | attrs->t19x_attrs.l3_alloc = (bool)(flags & NVGPU_VM_MAP_L3_ALLOC); |
30 | NVGPU_AS_MAP_BUFFER_FLAGS_L3_ALLOC); | ||
31 | } | 29 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index ebe8e381..637632e0 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -21,8 +21,6 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <nvgpu/bug.h> | 23 | #include <nvgpu/bug.h> |
24 | #include <uapi/linux/nvgpu.h> | ||
25 | |||
26 | #include <nvgpu/log.h> | 24 | #include <nvgpu/log.h> |
27 | #include <nvgpu/dma.h> | 25 | #include <nvgpu/dma.h> |
28 | #include <nvgpu/vm.h> | 26 | #include <nvgpu/vm.h> |
@@ -765,7 +763,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
765 | u8 pte_kind; | 763 | u8 pte_kind; |
766 | 764 | ||
767 | if (vm->userspace_managed && | 765 | if (vm->userspace_managed && |
768 | !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { | 766 | !(flags & NVGPU_VM_MAP_FIXED_OFFSET)) { |
769 | nvgpu_err(g, | 767 | nvgpu_err(g, |
770 | "non-fixed-offset mapping not available on " | 768 | "non-fixed-offset mapping not available on " |
771 | "userspace managed address spaces"); | 769 | "userspace managed address spaces"); |
@@ -774,11 +772,12 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
774 | 772 | ||
775 | binfo.flags = flags; | 773 | binfo.flags = flags; |
776 | binfo.size = nvgpu_os_buf_get_size(os_buf); | 774 | binfo.size = nvgpu_os_buf_get_size(os_buf); |
777 | binfo.compr_kind = (vm->enable_ctag && compr_kind != NV_KIND_INVALID ? | 775 | binfo.compr_kind = |
778 | compr_kind : NV_KIND_INVALID); | 776 | (vm->enable_ctag && compr_kind != NVGPU_KIND_INVALID ? |
777 | compr_kind : NVGPU_KIND_INVALID); | ||
779 | binfo.incompr_kind = incompr_kind; | 778 | binfo.incompr_kind = incompr_kind; |
780 | 779 | ||
781 | if (compr_kind != NV_KIND_INVALID) | 780 | if (compr_kind != NVGPU_KIND_INVALID) |
782 | map_key_kind = compr_kind; | 781 | map_key_kind = compr_kind; |
783 | else | 782 | else |
784 | map_key_kind = incompr_kind; | 783 | map_key_kind = incompr_kind; |
@@ -830,7 +829,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
830 | /* | 829 | /* |
831 | * Check if we should use a fixed offset for mapping this buffer. | 830 | * Check if we should use a fixed offset for mapping this buffer. |
832 | */ | 831 | */ |
833 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | 832 | if (flags & NVGPU_VM_MAP_FIXED_OFFSET) { |
834 | err = nvgpu_vm_area_validate_buffer(vm, | 833 | err = nvgpu_vm_area_validate_buffer(vm, |
835 | map_addr, | 834 | map_addr, |
836 | map_size, | 835 | map_size, |
@@ -848,7 +847,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
848 | goto clean_up; | 847 | goto clean_up; |
849 | } | 848 | } |
850 | 849 | ||
851 | if (binfo.compr_kind != NV_KIND_INVALID) { | 850 | if (binfo.compr_kind != NVGPU_KIND_INVALID) { |
852 | struct gk20a_comptags comptags = { 0 }; | 851 | struct gk20a_comptags comptags = { 0 }; |
853 | 852 | ||
854 | /* | 853 | /* |
@@ -903,14 +902,14 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm, | |||
903 | /* | 902 | /* |
904 | * Figure out the kind and ctag offset for the GMMU page tables | 903 | * Figure out the kind and ctag offset for the GMMU page tables |
905 | */ | 904 | */ |
906 | if (binfo.compr_kind != NV_KIND_INVALID && ctag_offset) { | 905 | if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset) { |
907 | /* | 906 | /* |
908 | * Adjust the ctag_offset as per the buffer map offset | 907 | * Adjust the ctag_offset as per the buffer map offset |
909 | */ | 908 | */ |
910 | ctag_offset += phys_offset >> | 909 | ctag_offset += phys_offset >> |
911 | ilog2(g->ops.fb.compression_page_size(g)); | 910 | ilog2(g->ops.fb.compression_page_size(g)); |
912 | pte_kind = binfo.compr_kind; | 911 | pte_kind = binfo.compr_kind; |
913 | } else if (binfo.incompr_kind != NV_KIND_INVALID) { | 912 | } else if (binfo.incompr_kind != NVGPU_KIND_INVALID) { |
914 | /* | 913 | /* |
915 | * Incompressible kind, ctag offset will not be programmed | 914 | * Incompressible kind, ctag offset will not be programmed |
916 | */ | 915 | */ |
@@ -1093,7 +1092,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset, | |||
1093 | if (!mapped_buffer) | 1092 | if (!mapped_buffer) |
1094 | goto done; | 1093 | goto done; |
1095 | 1094 | ||
1096 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | 1095 | if (mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) { |
1097 | if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) | 1096 | if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) |
1098 | /* | 1097 | /* |
1099 | * Looks like we have failed... Better not continue in | 1098 | * Looks like we have failed... Better not continue in |
@@ -1118,7 +1117,7 @@ done: | |||
1118 | static int nvgpu_vm_compute_compression(struct vm_gk20a *vm, | 1117 | static int nvgpu_vm_compute_compression(struct vm_gk20a *vm, |
1119 | struct nvgpu_ctag_buffer_info *binfo) | 1118 | struct nvgpu_ctag_buffer_info *binfo) |
1120 | { | 1119 | { |
1121 | bool kind_compressible = (binfo->compr_kind != NV_KIND_INVALID); | 1120 | bool kind_compressible = (binfo->compr_kind != NVGPU_KIND_INVALID); |
1122 | struct gk20a *g = gk20a_from_vm(vm); | 1121 | struct gk20a *g = gk20a_from_vm(vm); |
1123 | 1122 | ||
1124 | if (kind_compressible && | 1123 | if (kind_compressible && |
@@ -1127,7 +1126,7 @@ static int nvgpu_vm_compute_compression(struct vm_gk20a *vm, | |||
1127 | /* | 1126 | /* |
1128 | * Let's double check that there is a fallback kind | 1127 | * Let's double check that there is a fallback kind |
1129 | */ | 1128 | */ |
1130 | if (binfo->incompr_kind == NV_KIND_INVALID) { | 1129 | if (binfo->incompr_kind == NVGPU_KIND_INVALID) { |
1131 | nvgpu_err(g, | 1130 | nvgpu_err(g, |
1132 | "Unsupported page size for compressible " | 1131 | "Unsupported page size for compressible " |
1133 | "kind, but no fallback kind"); | 1132 | "kind, but no fallback kind"); |
@@ -1136,7 +1135,7 @@ static int nvgpu_vm_compute_compression(struct vm_gk20a *vm, | |||
1136 | nvgpu_log(g, gpu_dbg_map, | 1135 | nvgpu_log(g, gpu_dbg_map, |
1137 | "Unsupported page size for compressible " | 1136 | "Unsupported page size for compressible " |
1138 | "kind, demoting to incompressible"); | 1137 | "kind, demoting to incompressible"); |
1139 | binfo->compr_kind = NV_KIND_INVALID; | 1138 | binfo->compr_kind = NVGPU_KIND_INVALID; |
1140 | kind_compressible = false; | 1139 | kind_compressible = false; |
1141 | } | 1140 | } |
1142 | } | 1141 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index b6286c43..5ed2626f 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -20,8 +20,6 @@ | |||
20 | * DEALINGS IN THE SOFTWARE. | 20 | * DEALINGS IN THE SOFTWARE. |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <uapi/linux/nvgpu.h> | ||
24 | |||
25 | #include <nvgpu/vm.h> | 23 | #include <nvgpu/vm.h> |
26 | #include <nvgpu/vm_area.h> | 24 | #include <nvgpu/vm_area.h> |
27 | 25 | ||
@@ -121,7 +119,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
121 | goto clean_up_err; | 119 | goto clean_up_err; |
122 | 120 | ||
123 | vma = vm->vma[pgsz_idx]; | 121 | vma = vm->vma[pgsz_idx]; |
124 | if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET) | 122 | if (flags & NVGPU_VM_AREA_ALLOC_FIXED_OFFSET) |
125 | vaddr_start = nvgpu_alloc_fixed(vma, *addr, | 123 | vaddr_start = nvgpu_alloc_fixed(vma, *addr, |
126 | (u64)pages * | 124 | (u64)pages * |
127 | (u64)page_size, | 125 | (u64)page_size, |
@@ -143,7 +141,7 @@ int nvgpu_vm_area_alloc(struct vm_gk20a *vm, u32 pages, u32 page_size, | |||
143 | 141 | ||
144 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | 142 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); |
145 | 143 | ||
146 | if (flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) { | 144 | if (flags & NVGPU_VM_AREA_ALLOC_SPARSE) { |
147 | u64 map_addr = g->ops.mm.gmmu_map(vm, vaddr_start, | 145 | u64 map_addr = g->ops.mm.gmmu_map(vm, vaddr_start, |
148 | NULL, | 146 | NULL, |
149 | 0, | 147 | 0, |