summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c49
1 files changed, 40 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index a6507d2d..97b7aa80 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -822,14 +822,11 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
822 nvgpu_mutex_release(&vm->update_gmmu_lock); 822 nvgpu_mutex_release(&vm->update_gmmu_lock);
823} 823}
824 824
825int setup_buffer_kind_and_compression(struct vm_gk20a *vm, 825static int setup_kind_legacy(struct vm_gk20a *vm, struct buffer_attrs *bfr,
826 u32 flags, 826 bool *pkind_compressible)
827 struct buffer_attrs *bfr,
828 enum gmmu_pgsz_gk20a pgsz_idx)
829{ 827{
830 bool kind_compressible;
831 struct gk20a *g = gk20a_from_vm(vm); 828 struct gk20a *g = gk20a_from_vm(vm);
832 int ctag_granularity = g->ops.fb.compression_page_size(g); 829 bool kind_compressible;
833 830
834 if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v())) 831 if (unlikely(bfr->kind_v == gmmu_pte_kind_invalid_v()))
835 bfr->kind_v = gmmu_pte_kind_pitch_v(); 832 bfr->kind_v = gmmu_pte_kind_pitch_v();
@@ -840,7 +837,7 @@ int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
840 } 837 }
841 838
842 bfr->uc_kind_v = gmmu_pte_kind_invalid_v(); 839 bfr->uc_kind_v = gmmu_pte_kind_invalid_v();
843 /* find a suitable uncompressed kind if it becomes necessary later */ 840 /* find a suitable incompressible kind if it becomes necessary later */
844 kind_compressible = gk20a_kind_is_compressible(bfr->kind_v); 841 kind_compressible = gk20a_kind_is_compressible(bfr->kind_v);
845 if (kind_compressible) { 842 if (kind_compressible) {
846 bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v); 843 bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v);
@@ -852,6 +849,36 @@ int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
852 return -EINVAL; 849 return -EINVAL;
853 } 850 }
854 } 851 }
852
853 *pkind_compressible = kind_compressible;
854 return 0;
855}
856
857int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
858 u32 flags,
859 struct buffer_attrs *bfr,
860 enum gmmu_pgsz_gk20a pgsz_idx)
861{
862 bool kind_compressible;
863 struct gk20a *g = gk20a_from_vm(vm);
864 int ctag_granularity = g->ops.fb.compression_page_size(g);
865
866 if (!bfr->use_kind_v)
867 bfr->kind_v = gmmu_pte_kind_invalid_v();
868 if (!bfr->use_uc_kind_v)
869 bfr->uc_kind_v = gmmu_pte_kind_invalid_v();
870
871 if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) {
872 kind_compressible = (bfr->kind_v != gmmu_pte_kind_invalid_v());
873 if (!kind_compressible)
874 bfr->kind_v = bfr->uc_kind_v;
875 } else {
876 int err = setup_kind_legacy(vm, bfr, &kind_compressible);
877
878 if (err)
879 return err;
880 }
881
855 /* comptags only supported for suitable kinds, 128KB pagesize */ 882 /* comptags only supported for suitable kinds, 128KB pagesize */
856 if (kind_compressible && 883 if (kind_compressible &&
857 vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) { 884 vm->gmmu_page_sizes[pgsz_idx] < g->ops.fb.compressible_page_size(g)) {
@@ -865,6 +892,9 @@ int setup_buffer_kind_and_compression(struct vm_gk20a *vm,
865 else 892 else
866 bfr->ctag_lines = 0; 893 bfr->ctag_lines = 0;
867 894
895 bfr->use_kind_v = (bfr->kind_v != gmmu_pte_kind_invalid_v());
896 bfr->use_uc_kind_v = (bfr->uc_kind_v != gmmu_pte_kind_invalid_v());
897
868 return 0; 898 return 0;
869} 899}
870 900
@@ -1649,7 +1679,8 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
1649 int dmabuf_fd, 1679 int dmabuf_fd,
1650 u64 *offset_align, 1680 u64 *offset_align,
1651 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/ 1681 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/
1652 int kind, 1682 s16 compr_kind,
1683 s16 incompr_kind,
1653 u64 buffer_offset, 1684 u64 buffer_offset,
1654 u64 mapping_size, 1685 u64 mapping_size,
1655 struct vm_gk20a_mapping_batch *batch) 1686 struct vm_gk20a_mapping_batch *batch)
@@ -1690,7 +1721,7 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
1690 } 1721 }
1691 1722
1692 ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align, 1723 ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align,
1693 flags, kind, true, 1724 flags, compr_kind, incompr_kind, true,
1694 gk20a_mem_flag_none, 1725 gk20a_mem_flag_none,
1695 buffer_offset, 1726 buffer_offset,
1696 mapping_size, 1727 mapping_size,