From e620bbccdd9ec520ed74282ed7905c7b3ef03d4a Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Mon, 6 Nov 2017 16:47:15 -0800 Subject: gpu: nvgpu: Request CONTIG allocs for large PDs Request explicitly contiguous DMA memory for large page directory allocations. Large in this case means greater than PAGE_SIZE. This is necessary if the GPU's DMA allocator is set to, by default, allocate discontiguous memory. Bug 2015747 Change-Id: I3afe9c2990522058f6aa45f28030bc82a369ca69 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1593093 Reviewed-by: svc-mobile-coverity Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Konsta Holtta Reviewed-by: Terje Bergstrom Reviewed-by: mobile promotions Tested-by: mobile promotions --- drivers/gpu/nvgpu/common/mm/pd_cache.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/nvgpu/common/mm') diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c index f0abc03d..4c3e06ba 100644 --- a/drivers/gpu/nvgpu/common/mm/pd_cache.c +++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c @@ -143,6 +143,7 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd, u32 bytes) { int err; + unsigned long flags = 0; pd_dbg(g, "PD-Alloc [D] %u bytes", bytes); @@ -152,7 +153,20 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, return -ENOMEM; } - err = nvgpu_dma_alloc(g, bytes, pd->mem); + /* + * If bytes == PAGE_SIZE then it's impossible to get a discontiguous DMA + * allocation. Some DMA implementations may, despite this fact, still + * use the contiguous pool for page sized allocations. As such only + * request explicitly contiguous allocs if the page directory is larger + * than the page size. Also, of course, this is all only revelant for + * GPUs not using an IOMMU. If there is an IOMMU DMA allocs are always + * going to be virtually contiguous and we don't have to force the + * underlying allocations to be physically contiguous as well. + */ + if (!nvgpu_iommuable(g) && bytes > PAGE_SIZE) + flags = NVGPU_DMA_FORCE_CONTIGUOUS; + + err = nvgpu_dma_alloc_flags(g, flags, bytes, pd->mem); if (err) { nvgpu_err(g, "OOM allocating page directory!"); nvgpu_kfree(g, pd->mem); -- cgit v1.2.2