summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-10-17 20:12:28 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-01 22:06:30 -0400
commit88ee812d56333375f7ae44e28b483c1a161d75da (patch)
tree00b4ff8d2c0aa21721a9e8756df7dd9b1aad95ed /drivers/gpu/nvgpu/common/mm/vm.c
parenta8bd154f7907c7054f8668c5995c1b5a7f748edc (diff)
gpu: nvgpu: Remove buffer_attrs struct
Remove the buffer_attrs struct and replace it with a more streamlined nvgpu_ctag_buffer_info struct. This struct allows several different fields to all be passed by pointer to the various kind/compression functions in the VM map process. This path also moves several comptag/kind related functions to the core vm.c code since these functions can be reused by other OSes. Change-Id: I2a0f0a1c4b554ce4c8f2acdbe3161392e717d3bf Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1583984 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c122
1 files changed, 122 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 2ce62e75..64c9c217 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -35,6 +35,7 @@
35 35
36#include "gk20a/gk20a.h" 36#include "gk20a/gk20a.h"
37#include "gk20a/mm_gk20a.h" 37#include "gk20a/mm_gk20a.h"
38#include "gk20a/kind_gk20a.h"
38 39
39static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer, 40static void __nvgpu_vm_unmap(struct nvgpu_mapped_buf *mapped_buffer,
40 struct vm_gk20a_mapping_batch *batch); 41 struct vm_gk20a_mapping_batch *batch);
@@ -827,3 +828,124 @@ done:
827 nvgpu_mutex_release(&vm->update_gmmu_lock); 828 nvgpu_mutex_release(&vm->update_gmmu_lock);
828 return; 829 return;
829} 830}
831
832int nvgpu_vm_init_kind_info(struct nvgpu_ctag_buffer_info *binfo,
833 s16 compr_kind, s16 incompr_kind)
834{
835 if (binfo->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
836 /* were we supplied with a kind in either parameter? */
837 if ((compr_kind < 0 || compr_kind >= NV_KIND_ATTR_SIZE) &&
838 (incompr_kind < 0 || incompr_kind >= NV_KIND_ATTR_SIZE))
839 return -EINVAL;
840
841 if (compr_kind != NV_KIND_INVALID) {
842 binfo->use_kind_v = true;
843 binfo->kind_v = (u8)compr_kind;
844 }
845
846 if (incompr_kind != NV_KIND_INVALID) {
847 binfo->use_uc_kind_v = true;
848 binfo->uc_kind_v = (u8)incompr_kind;
849 }
850 } else {
851 if (compr_kind < 0 || compr_kind >= NV_KIND_ATTR_SIZE)
852 return -EINVAL;
853
854 binfo->use_kind_v = true;
855 binfo->kind_v = (u8)compr_kind;
856
857 /*
858 * Note: nvgpu_vm_kind_and_compression() will figure out
859 * uc_kind_v or return an error.
860 */
861 }
862
863 return 0;
864}
865
866static int nvgpu_vm_setup_kind_legacy(struct vm_gk20a *vm,
867 struct nvgpu_ctag_buffer_info *binfo,
868 bool *pkind_compressible)
869{
870 struct gk20a *g = gk20a_from_vm(vm);
871 bool kind_compressible;
872
873 if (unlikely(binfo->kind_v == g->ops.mm.get_kind_invalid()))
874 binfo->kind_v = g->ops.mm.get_kind_pitch();
875
876 if (unlikely(!gk20a_kind_is_supported(binfo->kind_v))) {
877 nvgpu_err(g, "kind 0x%x not supported", binfo->kind_v);
878 return -EINVAL;
879 }
880
881 binfo->uc_kind_v = g->ops.mm.get_kind_invalid();
882
883 /*
884 * Find a suitable incompressible kind if it becomes necessary later.
885 */
886 kind_compressible = gk20a_kind_is_compressible(binfo->kind_v);
887 if (kind_compressible) {
888 binfo->uc_kind_v = gk20a_get_uncompressed_kind(binfo->kind_v);
889 if (binfo->uc_kind_v == g->ops.mm.get_kind_invalid()) {
890 /*
891 * Shouldn't happen, but it is worth cross-checking.
892 */
893 nvgpu_err(g, "comptag kind 0x%x can't be"
894 " downgraded to uncompressed kind",
895 binfo->kind_v);
896 return -EINVAL;
897 }
898 }
899
900 *pkind_compressible = kind_compressible;
901
902 return 0;
903}
904
905int nvgpu_vm_compute_kind_and_compression(struct vm_gk20a *vm,
906 struct nvgpu_ctag_buffer_info *binfo)
907{
908 bool kind_compressible;
909 struct gk20a *g = gk20a_from_vm(vm);
910 int ctag_granularity = g->ops.fb.compression_page_size(g);
911
912 if (!binfo->use_kind_v)
913 binfo->kind_v = g->ops.mm.get_kind_invalid();
914 if (!binfo->use_uc_kind_v)
915 binfo->uc_kind_v = g->ops.mm.get_kind_invalid();
916
917 if (binfo->flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
918 kind_compressible = (binfo->kind_v !=
919 g->ops.mm.get_kind_invalid());
920 if (!kind_compressible)
921 binfo->kind_v = binfo->uc_kind_v;
922 } else {
923 int err = nvgpu_vm_setup_kind_legacy(vm, binfo,
924 &kind_compressible);
925
926 if (err)
927 return err;
928 }
929
930 /* comptags only supported for suitable kinds, 128KB pagesize */
931 if (kind_compressible &&
932 vm->gmmu_page_sizes[binfo->pgsz_idx] <
933 g->ops.fb.compressible_page_size(g)) {
934 /* it is safe to fall back to uncompressed as
935 functionality is not harmed */
936 binfo->kind_v = binfo->uc_kind_v;
937 kind_compressible = false;
938 }
939
940 if (kind_compressible)
941 binfo->ctag_lines = DIV_ROUND_UP_ULL(binfo->size,
942 ctag_granularity);
943 else
944 binfo->ctag_lines = 0;
945
946 binfo->use_kind_v = (binfo->kind_v != g->ops.mm.get_kind_invalid());
947 binfo->use_uc_kind_v = (binfo->uc_kind_v !=
948 g->ops.mm.get_kind_invalid());
949
950 return 0;
951}