summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/pd_cache.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-07-26 17:35:48 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-07-31 02:11:42 -0400
commit43ae97000be786e4118d431637f05b1462e296c4 (patch)
tree172a912727c25ac20ed64373b9220afa1a610a53 /drivers/gpu/nvgpu/common/mm/pd_cache.c
parent04470a984d3e91ea08b4b2a4db11341e7ee84640 (diff)
gpu: nvgpu: Use non-contig mem in pd_cache
In the PD caching code use a non-contiguous DMA alloc for PAGE_SIZE and below allocations. There's no need for using the special contig pool of mem for these page sized allocs so wasting said mem can lead us to OOM problems pretty quickly (think large sparse textures, for example). Also turn several pd_dbg() statements for printing OOM errors into nvgpu_err()s since knowing exactly where an alloc fails is very convenient. Bug 200326705 Change-Id: Ib7c45020894d4bdd73cc92179ef707e472714d61 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1527294 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/pd_cache.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index 4f312eff..2b6ca7e7 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -142,14 +142,13 @@ int __nvgpu_pd_cache_alloc_direct(struct gk20a *g,
142 142
143 pd->mem = nvgpu_kzalloc(g, sizeof(*pd->mem)); 143 pd->mem = nvgpu_kzalloc(g, sizeof(*pd->mem));
144 if (!pd->mem) { 144 if (!pd->mem) {
145 pd_dbg(g, "OOM allocating nvgpu_mem struct!"); 145 nvgpu_err(g, "OOM allocating nvgpu_mem struct!");
146 return -ENOMEM; 146 return -ENOMEM;
147 } 147 }
148 148
149 err = nvgpu_dma_alloc_flags(g, NVGPU_DMA_FORCE_CONTIGUOUS, 149 err = nvgpu_dma_alloc(g, bytes, pd->mem);
150 bytes, pd->mem);
151 if (err) { 150 if (err) {
152 pd_dbg(g, "OOM allocating page directory!"); 151 nvgpu_err(g, "OOM allocating page directory!");
153 nvgpu_kfree(g, pd->mem); 152 nvgpu_kfree(g, pd->mem);
154 return -ENOMEM; 153 return -ENOMEM;
155 } 154 }
@@ -175,14 +174,13 @@ static int nvgpu_pd_cache_alloc_new(struct gk20a *g,
175 174
176 pentry = nvgpu_kzalloc(g, sizeof(*pentry)); 175 pentry = nvgpu_kzalloc(g, sizeof(*pentry));
177 if (!pentry) { 176 if (!pentry) {
178 pd_dbg(g, "OOM allocating pentry!"); 177 nvgpu_err(g, "OOM allocating pentry!");
179 return -ENOMEM; 178 return -ENOMEM;
180 } 179 }
181 180
182 if (nvgpu_dma_alloc_flags(g, NVGPU_DMA_FORCE_CONTIGUOUS, 181 if (nvgpu_dma_alloc(g, PAGE_SIZE, &pentry->mem)) {
183 PAGE_SIZE, &pentry->mem)) {
184 nvgpu_kfree(g, pentry); 182 nvgpu_kfree(g, pentry);
185 pd_dbg(g, "Unable to DMA alloc!"); 183 nvgpu_err(g, "Unable to DMA alloc!");
186 return -ENOMEM; 184 return -ENOMEM;
187 } 185 }
188 186
@@ -295,7 +293,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
295 err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd); 293 err = nvgpu_pd_cache_alloc_from_partial(g, cache, pentry, pd);
296 294
297 if (err) 295 if (err)
298 pd_dbg(g, "PD-Alloc [C] Failed!"); 296 nvgpu_err(g, "PD-Alloc [C] Failed!");
299 297
300 return err; 298 return err;
301} 299}