summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-04-18 16:14:50 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-22 13:03:55 -0400
commita6e3403f460e835cd35d1b82e826aa440e5378e3 (patch)
tree63b66643d6b859e33f6ce5e340d5543d117c4c7d /drivers
parent2bc421dd52f95aead2076c152d8da3bf698e3c1a (diff)
gpu: nvgpu: Update __get_pte_size() to check IOMMU-ability
When generating the PTE size for a given mapping the code must consider whether the GPU is being IOMMU'ed. The presence and usage of an IOMMU implies the buffers will appear contiguous to the GPU. Without an IOMMU we cannot assume that and therefor must use small pages regardless of the size of the buffer to be mapped. Bug 2011640 Change-Id: I6c64cbcd8844a7ed855116754b795d949a3003af Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1697891 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/nvgpu/common/mm/mm.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/mm.c b/drivers/gpu/nvgpu/common/mm/mm.c
index 3c083ab8..c9b2b493 100644
--- a/drivers/gpu/nvgpu/common/mm/mm.c
+++ b/drivers/gpu/nvgpu/common/mm/mm.c
@@ -77,10 +77,13 @@ static enum gmmu_pgsz_gk20a __get_pte_size_split_addr(struct vm_gk20a *vm,
77 * - Attempt to find a reserved memory area and use the page size 77 * - Attempt to find a reserved memory area and use the page size
78 * based on that. 78 * based on that.
79 * - If no reserved page size is available, default to small pages. 79 * - If no reserved page size is available, default to small pages.
80 * o If the base is zero: 80 * o If the base is zero and we have an SMMU:
81 * - If the size is larger than or equal to the big page size, use big 81 * - If the size is larger than or equal to the big page size, use big
82 * pages. 82 * pages.
83 * - Otherwise use small pages. 83 * - Otherwise use small pages.
84 * o If there's no SMMU:
85 * - Regardless of buffer size use small pages since we have no
86 * - guarantee of contiguity.
84 */ 87 */
85enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size) 88enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
86{ 89{
@@ -95,7 +98,8 @@ enum gmmu_pgsz_gk20a __get_pte_size(struct vm_gk20a *vm, u64 base, u64 size)
95 if (base) 98 if (base)
96 return __get_pte_size_fixed_map(vm, base, size); 99 return __get_pte_size_fixed_map(vm, base, size);
97 100
98 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big]) 101 if (size >= vm->gmmu_page_sizes[gmmu_page_size_big] &&
102 nvgpu_iommuable(g))
99 return gmmu_page_size_big; 103 return gmmu_page_size_big;
100 return gmmu_page_size_small; 104 return gmmu_page_size_small;
101} 105}