summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-11-16 15:56:53 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-17 19:17:20 -0500
commit35ae4194a05d47aa6d79353428f81f2ca47ce90f (patch)
tree73c7f15348e1f5deb411392f41e339572b797bb4 /drivers/gpu/nvgpu/common/mm/vm.c
parentb42fb7ba26b565f93118fbdd9e17b42ee6144c5e (diff)
gpu: nvgpu: Add translation for NVGPU MM flags
Add a translation layer to convert from the NVGPU_AS_* flags to to new set of NVGPU_VM_MAP_* and NVGPU_VM_AREA_ALLOC_* flags. This allows the common MM code to not depend on the UAPI header defined for Linux. In addition to this change a couple of other small changes were made: 1. Deprecate, print a warning, and ignore usage of the NVGPU_AS_MAP_BUFFER_FLAGS_MAPPABLE_COMPBITS flag. 2. Move the t19x IO coherence flag from the t19x UAPI header to the regular UAPI header. JIRA NVGPU-293 Change-Id: I146402b0e8617294374e63e78f8826c57cd3b291 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1599802 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index ebe8e381..637632e0 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -21,8 +21,6 @@
21 */ 21 */
22 22
23#include <nvgpu/bug.h> 23#include <nvgpu/bug.h>
24#include <uapi/linux/nvgpu.h>
25
26#include <nvgpu/log.h> 24#include <nvgpu/log.h>
27#include <nvgpu/dma.h> 25#include <nvgpu/dma.h>
28#include <nvgpu/vm.h> 26#include <nvgpu/vm.h>
@@ -765,7 +763,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
765 u8 pte_kind; 763 u8 pte_kind;
766 764
767 if (vm->userspace_managed && 765 if (vm->userspace_managed &&
768 !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) { 766 !(flags & NVGPU_VM_MAP_FIXED_OFFSET)) {
769 nvgpu_err(g, 767 nvgpu_err(g,
770 "non-fixed-offset mapping not available on " 768 "non-fixed-offset mapping not available on "
771 "userspace managed address spaces"); 769 "userspace managed address spaces");
@@ -774,11 +772,12 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
774 772
775 binfo.flags = flags; 773 binfo.flags = flags;
776 binfo.size = nvgpu_os_buf_get_size(os_buf); 774 binfo.size = nvgpu_os_buf_get_size(os_buf);
777 binfo.compr_kind = (vm->enable_ctag && compr_kind != NV_KIND_INVALID ? 775 binfo.compr_kind =
778 compr_kind : NV_KIND_INVALID); 776 (vm->enable_ctag && compr_kind != NVGPU_KIND_INVALID ?
777 compr_kind : NVGPU_KIND_INVALID);
779 binfo.incompr_kind = incompr_kind; 778 binfo.incompr_kind = incompr_kind;
780 779
781 if (compr_kind != NV_KIND_INVALID) 780 if (compr_kind != NVGPU_KIND_INVALID)
782 map_key_kind = compr_kind; 781 map_key_kind = compr_kind;
783 else 782 else
784 map_key_kind = incompr_kind; 783 map_key_kind = incompr_kind;
@@ -830,7 +829,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
830 /* 829 /*
831 * Check if we should use a fixed offset for mapping this buffer. 830 * Check if we should use a fixed offset for mapping this buffer.
832 */ 831 */
833 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 832 if (flags & NVGPU_VM_MAP_FIXED_OFFSET) {
834 err = nvgpu_vm_area_validate_buffer(vm, 833 err = nvgpu_vm_area_validate_buffer(vm,
835 map_addr, 834 map_addr,
836 map_size, 835 map_size,
@@ -848,7 +847,7 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
848 goto clean_up; 847 goto clean_up;
849 } 848 }
850 849
851 if (binfo.compr_kind != NV_KIND_INVALID) { 850 if (binfo.compr_kind != NVGPU_KIND_INVALID) {
852 struct gk20a_comptags comptags = { 0 }; 851 struct gk20a_comptags comptags = { 0 };
853 852
854 /* 853 /*
@@ -903,14 +902,14 @@ struct nvgpu_mapped_buf *nvgpu_vm_map(struct vm_gk20a *vm,
903 /* 902 /*
904 * Figure out the kind and ctag offset for the GMMU page tables 903 * Figure out the kind and ctag offset for the GMMU page tables
905 */ 904 */
906 if (binfo.compr_kind != NV_KIND_INVALID && ctag_offset) { 905 if (binfo.compr_kind != NVGPU_KIND_INVALID && ctag_offset) {
907 /* 906 /*
908 * Adjust the ctag_offset as per the buffer map offset 907 * Adjust the ctag_offset as per the buffer map offset
909 */ 908 */
910 ctag_offset += phys_offset >> 909 ctag_offset += phys_offset >>
911 ilog2(g->ops.fb.compression_page_size(g)); 910 ilog2(g->ops.fb.compression_page_size(g));
912 pte_kind = binfo.compr_kind; 911 pte_kind = binfo.compr_kind;
913 } else if (binfo.incompr_kind != NV_KIND_INVALID) { 912 } else if (binfo.incompr_kind != NVGPU_KIND_INVALID) {
914 /* 913 /*
915 * Incompressible kind, ctag offset will not be programmed 914 * Incompressible kind, ctag offset will not be programmed
916 */ 915 */
@@ -1093,7 +1092,7 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset,
1093 if (!mapped_buffer) 1092 if (!mapped_buffer)
1094 goto done; 1093 goto done;
1095 1094
1096 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1095 if (mapped_buffer->flags & NVGPU_VM_MAP_FIXED_OFFSET) {
1097 if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer)) 1096 if (nvgpu_vm_unmap_sync_buffer(vm, mapped_buffer))
1098 /* 1097 /*
1099 * Looks like we have failed... Better not continue in 1098 * Looks like we have failed... Better not continue in
@@ -1118,7 +1117,7 @@ done:
1118static int nvgpu_vm_compute_compression(struct vm_gk20a *vm, 1117static int nvgpu_vm_compute_compression(struct vm_gk20a *vm,
1119 struct nvgpu_ctag_buffer_info *binfo) 1118 struct nvgpu_ctag_buffer_info *binfo)
1120{ 1119{
1121 bool kind_compressible = (binfo->compr_kind != NV_KIND_INVALID); 1120 bool kind_compressible = (binfo->compr_kind != NVGPU_KIND_INVALID);
1122 struct gk20a *g = gk20a_from_vm(vm); 1121 struct gk20a *g = gk20a_from_vm(vm);
1123 1122
1124 if (kind_compressible && 1123 if (kind_compressible &&
@@ -1127,7 +1126,7 @@ static int nvgpu_vm_compute_compression(struct vm_gk20a *vm,
1127 /* 1126 /*
1128 * Let's double check that there is a fallback kind 1127 * Let's double check that there is a fallback kind
1129 */ 1128 */
1130 if (binfo->incompr_kind == NV_KIND_INVALID) { 1129 if (binfo->incompr_kind == NVGPU_KIND_INVALID) {
1131 nvgpu_err(g, 1130 nvgpu_err(g,
1132 "Unsupported page size for compressible " 1131 "Unsupported page size for compressible "
1133 "kind, but no fallback kind"); 1132 "kind, but no fallback kind");
@@ -1136,7 +1135,7 @@ static int nvgpu_vm_compute_compression(struct vm_gk20a *vm,
1136 nvgpu_log(g, gpu_dbg_map, 1135 nvgpu_log(g, gpu_dbg_map,
1137 "Unsupported page size for compressible " 1136 "Unsupported page size for compressible "
1138 "kind, demoting to incompressible"); 1137 "kind, demoting to incompressible");
1139 binfo->compr_kind = NV_KIND_INVALID; 1138 binfo->compr_kind = NVGPU_KIND_INVALID;
1140 kind_compressible = false; 1139 kind_compressible = false;
1141 } 1140 }
1142 } 1141 }