summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm')
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index f0abc03d..4c3e06ba 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -143,6 +143,7 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
143 struct nvgpu_gmmu_pd *pd, u32 bytes) 143 struct nvgpu_gmmu_pd *pd, u32 bytes)
144{ 144{
145 int err; 145 int err;
146 unsigned long flags = 0;
146 147
147 pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); 148 pd_dbg(g, "PD-Alloc [D] %u bytes", bytes);
148 149
@@ -152,7 +153,20 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
152 return -ENOMEM; 153 return -ENOMEM;
153 } 154 }
154 155
155 err = nvgpu_dma_alloc(g, bytes, pd->mem); 156 /*
157 * If bytes == PAGE_SIZE then it's impossible to get a discontiguous DMA
158 * allocation. Some DMA implementations may, despite this fact, still
159 * use the contiguous pool for page sized allocations. As such only
160 * request explicitly contiguous allocs if the page directory is larger
161 * than the page size. Also, of course, this is all only revelant for
162 * GPUs not using an IOMMU. If there is an IOMMU DMA allocs are always
163 * going to be virtually contiguous and we don't have to force the
164 * underlying allocations to be physically contiguous as well.
165 */
166 if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE)
167 flags = NVGPU_DMA_FORCE_CONTIGUOUS;
168
169 err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem);
156 if (err) { 170 if (err) {
157 nvgpu_err(g, "OOM allocating page directory!"); 171 nvgpu_err(g, "OOM allocating page directory!");
158 nvgpu_kfree(g, pd->mem); 172 nvgpu_kfree(g, pd->mem);