summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorSami Kiminki <skiminki@nvidia.com>2017-11-02 16:03:15 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-10 11:38:19 -0500
commitcefabe7eb1f1f1dba9692e21ab4f1b88b9163489 (patch)
treeb0f4e6ce64cd408e780925b8b3365b45805b7d95 /drivers/gpu/nvgpu/common/mm/vm.c
parentb584bf8aa814d026498ebcee23480d1963338e47 (diff)
gpu: nvgpu: Remove PTE kind logic
Since NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL was made mandatory, kernel does not need to know the details about the PTE kinds anymore. Thus, we can remove the kind_gk20a.h header and the code related to kind table setup, as well as simplify buffer mapping code a bit. Bug 1902982 Change-Id: Iaf798023c219a64fb0a84da09431c5ce4bc046eb Signed-off-by: Sami Kiminki <skiminki@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1560933 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c124
1 files changed, 18 insertions, 106 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 64c9c217..46783e4e 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -35,7 +35,6 @@
35 35
36#include "gk20a/gk20a.h" 36#include "gk20a/gk20a.h"
37#include "gk20a/mm_gk20a.h" 37#include "gk20a/mm_gk20a.h"
38#include "gk20a/kind_gk20a.h"
39 38
40static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, 39static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer,
41 struct vm_gk20a_mapping_batch *batch); 40 struct vm_gk20a_mapping_batch *batch);
@@ -829,123 +828,36 @@ done:
829 return; 828 return;
830} 829}
831 830
832int nvgpu_vm_init_kind_info(struct nvgpu_ctag_buffer_info *binfo, 831int nvgpu_vm_compute_compression(struct vm_gk20a *vm,
833 s16 compr_kind, s16 incompr_kind) 832 struct nvgpu_ctag_buffer_info *binfo)
834{
835 if (binfo->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
836 /* were we supplied with a kind in either parameter? */
837 if ((compr_kind < 0 || compr_kind >= NV_KIND_ATTR_SIZE) &&
838 (incompr_kind < 0 || incompr_kind >= NV_KIND_ATTR_SIZE))
839 return -EINVAL;
840
841 if (compr_kind != NV_KIND_INVALID) {
842 binfo->use_kind_v = true;
843 binfo->kind_v = (u8)compr_kind;
844 }
845
846 if (incompr_kind != NV_KIND_INVALID) {
847 binfo->use_uc_kind_v = true;
848 binfo->uc_kind_v = (u8)incompr_kind;
849 }
850 } else {
851 if (compr_kind < 0 || compr_kind >= NV_KIND_ATTR_SIZE)
852 return -EINVAL;
853
854 binfo->use_kind_v = true;
855 binfo->kind_v = (u8)compr_kind;
856
857 /*
858 * Note: nvgpu_vm_kind_and_compression() will figure out
859 * uc_kind_v or return an error.
860 */
861 }
862
863 return 0;
864}
865
866static int nvgpu_vm_setup_kind_legacy(struct vm_gk20a *vm,
867 struct nvgpu_ctag_buffer_info *binfo,
868 bool *pkind_compressible)
869{ 833{
870 struct gk20a *g = gk20a_from_vm(vm); 834 bool kind_compressible = (binfo->compr_kind != NV_KIND_INVALID);
871 bool kind_compressible;
872
873 if (unlikely(binfo->kind_v == g->ops.mm.get_kind_invalid()))
874 binfo->kind_v = g->ops.mm.get_kind_pitch();
875
876 if (unlikely(!gk20a_kind_is_supported(binfo->kind_v))) {
877 nvgpu_err(g, "kind 0x%x not supported", binfo->kind_v);
878 return -EINVAL;
879 }
880
881 binfo->uc_kind_v = g->ops.mm.get_kind_invalid();
882
883 /*
884 * Find a suitable incompressible kind if it becomes necessary later.
885 */
886 kind_compressible = gk20a_kind_is_compressible(binfo->kind_v);
887 if (kind_compressible) {
888 binfo->uc_kind_v = gk20a_get_uncompressed_kind(binfo->kind_v);
889 if (binfo->uc_kind_v == g->ops.mm.get_kind_invalid()) {
890 /*
891 * Shouldn't happen, but it is worth cross-checking.
892 */
893 nvgpu_err(g, "comptag kind 0x%x can't be"
894 " downgraded to uncompressed kind",
895 binfo->kind_v);
896 return -EINVAL;
897 }
898 }
899
900 *pkind_compressible = kind_compressible;
901
902 return 0;
903}
904
905int nvgpu_vm_compute_kind_and_compression(struct vm_gk20a *vm,
906 struct nvgpu_ctag_buffer_info *binfo)
907{
908 bool kind_compressible;
909 struct gk20a *g = gk20a_from_vm(vm); 835 struct gk20a *g = gk20a_from_vm(vm);
910 int ctag_granularity = g->ops.fb.compression_page_size(g); 836 int ctag_granularity = g->ops.fb.compression_page_size(g);
911 837
912 if (!binfo->use_kind_v)
913 binfo->kind_v = g->ops.mm.get_kind_invalid();
914 if (!binfo->use_uc_kind_v)
915 binfo->uc_kind_v = g->ops.mm.get_kind_invalid();
916
917 if (binfo->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
918 kind_compressible = (binfo->kind_v !=
919 g->ops.mm.get_kind_invalid());
920 if (!kind_compressible)
921 binfo->kind_v = binfo->uc_kind_v;
922 } else {
923 int err = nvgpu_vm_setup_kind_legacy(vm, binfo,
924 &kind_compressible);
925
926 if (err)
927 return err;
928 }
929
930 /* comptags only supported for suitable kinds, 128KB pagesize */
931 if (kind_compressible && 838 if (kind_compressible &&
932 vm->gmmu_page_sizes[binfo->pgsz_idx] < 839 vm->gmmu_page_sizes[binfo->pgsz_idx] <
933 g->ops.fb.compressible_page_size(g)) { 840 g->ops.fb.compressible_page_size(g)) {
934 /* it is safe to fall back to uncompressed as 841 /*
935 functionality is not harmed */ 842 * Let's double check that there is a fallback kind
936 binfo->kind_v = binfo->uc_kind_v; 843 */
937 kind_compressible = false; 844 if (binfo->incompr_kind == NV_KIND_INVALID) {
845 nvgpu_err(g,
846 "Unsupported page size for compressible "
847 "kind, but no fallback kind");
848 return -EINVAL;
849 } else {
850 nvgpu_log(g, gpu_dbg_map,
851 "Unsupported page size for compressible "
852 "kind, demoting to incompressible");
853 binfo->compr_kind = NV_KIND_INVALID;
854 kind_compressible = false;
855 }
938 } 856 }
939 857
940 if (kind_compressible) 858 if (kind_compressible)
941 binfo->ctag_lines = DIV_ROUND_UP_ULL(binfo->size, 859 binfo->ctag_lines = DIV_ROUND_UP_ULL(binfo->size,
942 ctag_granularity); 860 ctag_granularity);
943 else
944 binfo->ctag_lines = 0;
945
946 binfo->use_kind_v = (binfo->kind_v != g->ops.mm.get_kind_invalid());
947 binfo->use_uc_kind_v = (binfo->uc_kind_v !=
948 g->ops.mm.get_kind_invalid());
949 861
950 return 0; 862 return 0;
951} 863}