summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-09-09 02:48:14 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-26 06:33:49 -0400
commit5c049b5c793a14a3cb936b23e503b07e6ac5c0d6 (patch)
tree0b3ef99e668ff663e74681bd17e14b57ef8f1e88 /drivers
parent7b9bf036f44420d4cf74656481ba782de6689978 (diff)
gpu: nvgpu: fix allocation and map size mismatch while mapping
It is possible to allocate larger size than user requested e.g. If we allocate at 64k granularity, and user asks for 32k buffer, we end up allocating 64k chunk. User still asks to map the buffer with size 32k and hence we reserve mapping addresses only for 32k But due to bug in mapping in update_gmmu_ptes_locked() we end up creating mappings considering size of 64k and corrupt some mappings Fix this by considering min(chunk->length, map_size) while mapping address range for a chunk Also, map_size will be zero once we map all requested address range. So bail out from the loop if map_size is zero Bug 1805064 Change-Id: I125d3ce261684dce7e679f9cb39198664f8937c4 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1217755 (cherry picked from commit 3ee1c6bc0718fb8dd9a28a37eff43a2872bdd5c0) Reviewed-on: http://git-master/r/1221775 GVS: Gerrit_Virtual_Submit Reviewed-by: Bharat Nihalani <bnihalani@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index a0e88c3e..1551dd16 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -3749,6 +3749,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3749 u64 ctag = (u64)ctag_offset * (u64)ctag_granularity; 3749 u64 ctag = (u64)ctag_offset * (u64)ctag_granularity;
3750 u64 iova = 0; 3750 u64 iova = 0;
3751 u64 space_to_skip = buffer_offset; 3751 u64 space_to_skip = buffer_offset;
3752 u64 map_size = gpu_end - gpu_va;
3752 u32 page_size = vm->gmmu_page_sizes[pgsz_idx]; 3753 u32 page_size = vm->gmmu_page_sizes[pgsz_idx];
3753 int err; 3754 int err;
3754 struct scatterlist *sgl = NULL; 3755 struct scatterlist *sgl = NULL;
@@ -3787,6 +3788,7 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3787 } else { 3788 } else {
3788 iova = chunk->base + space_to_skip; 3789 iova = chunk->base + space_to_skip;
3789 length = chunk->length - space_to_skip; 3790 length = chunk->length - space_to_skip;
3791 length = min(length, map_size);
3790 space_to_skip = 0; 3792 space_to_skip = 0;
3791 3793
3792 err = update_gmmu_level_locked(vm, 3794 err = update_gmmu_level_locked(vm,
@@ -3799,10 +3801,16 @@ static int update_gmmu_ptes_locked(struct vm_gk20a *vm,
3799 cacheable, unmapped_pte, 3801 cacheable, unmapped_pte,
3800 rw_flag, sparse, 0, priv, 3802 rw_flag, sparse, 0, priv,
3801 aperture); 3803 aperture);
3804 if (err)
3805 break;
3802 3806
3803 /* need to set explicit zero here */ 3807 /* need to set explicit zero here */
3804 space_to_skip = 0; 3808 space_to_skip = 0;
3805 gpu_va += length; 3809 gpu_va += length;
3810 map_size -= length;
3811
3812 if (!map_size)
3813 break;
3806 } 3814 }
3807 } 3815 }
3808 } else { 3816 } else {