summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-11-06 19:47:15 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-11-08 13:37:00 -0500
commite620bbccdd9ec520ed74282ed7905c7b3ef03d4a (patch)
treedb8155f64940ebb3d3d13d4c3ab2be1767663149 /drivers/gpu/nvgpu/common/mm
parent3cb65f57d532d596bfb931f3e4b995004e36a129 (diff)
gpu: nvgpu: Request CONTIG allocs for large PDs
Request explicitly contiguous DMA memory for large page directory allocations. Large in this case means greater than PAGE_SIZE. This is necessary if the GPU's DMA allocator is set to, by default, allocate discontiguous memory. Bug 2015747 Change-Id: I3afe9c2990522058f6aa45f28030bc82a369ca69 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1593093 Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index f0abc03d..4c3e06ba 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -143,6 +143,7 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
143 struct nvgpu_gmmu_pd *pd, u32 bytes) 143 struct nvgpu_gmmu_pd *pd, u32 bytes)
144{ 144{
145 int err; 145 int err;
146 unsigned long flags = 0;
146 147
147 pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); 148 pd_dbg(g, "PD-Alloc [D] %u bytes", bytes);
148 149
@@ -152,7 +153,20 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
152 return -ENOMEM; 153 return -ENOMEM;
153 } 154 }
154 155
155 err = nvgpu_dma_alloc(g, bytes, pd->mem); 156 /*
157 * If bytes == PAGE_SIZE then it's impossible to get a discontiguous DMA
158 * allocation. Some DMA implementations may, despite this fact, still
159 * use the contiguous pool for page sized allocations. As such only
160 * request explicitly contiguous allocs if the page directory is larger
161 * than the page size. Also, of course, this is all only revelant for
162 * GPUs not using an IOMMU. If there is an IOMMU DMA allocs are always
163 * going to be virtually contiguous and we don't have to force the
164 * underlying allocations to be physically contiguous as well.
165 */
166 if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE)
167 flags = NVGPU_DMA_FORCE_CONTIGUOUS;
168
169 err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem);
156 if (err) { 170 if (err) {
157 nvgpu_err(g, "OOM allocating page directory!"); 171 nvgpu_err(g, "OOM allocating page directory!");
158 nvgpu_kfree(g, pd->mem); 172 nvgpu_kfree(g, pd->mem);