summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/pd_cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/pd_cache.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/pd_cache.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/pd_cache.c b/drivers/gpu/nvgpu/common/mm/pd_cache.c
index 77e20c38..dae6d34e 100644
--- a/drivers/gpu/nvgpu/common/mm/pd_cache.c
+++ b/drivers/gpu/nvgpu/common/mm/pd_cache.c
@@ -64,10 +64,10 @@
64 * lists. For a 4Kb page NVGPU_PD_CACHE_COUNT is 4. This is enough space for 64 * lists. For a 4Kb page NVGPU_PD_CACHE_COUNT is 4. This is enough space for
65 * 256, 512, 1024, and 2048 byte PDs. 65 * 256, 512, 1024, and 2048 byte PDs.
66 * 66 *
67 * __nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD 67 * nvgpu_pd_alloc() will allocate a PD for the GMMU. It will check if the PD
68 * size is page size or larger and choose the correct allocation scheme - either 68 * size is page size or larger and choose the correct allocation scheme - either
69 * from the PD cache or directly. Similarly __nvgpu_pd_free() will free a PD 69 * from the PD cache or directly. Similarly nvgpu_pd_free() will free a PD
70 * allocated by __nvgpu_pd_alloc(). 70 * allocated by nvgpu_pd_alloc().
71 * 71 *
72 * Since the top level PD (the PDB) is a page aligned pointer but less than a 72 * Since the top level PD (the PDB) is a page aligned pointer but less than a
73 * page size the direct functions must be used for allocating PDBs. Otherwise 73 * page size the direct functions must be used for allocating PDBs. Otherwise
@@ -150,8 +150,8 @@ void nvgpu_pd_cache_fini(struct gk20a *g)
150 * Note: this does not need the cache lock since it does not modify any of the 150 * Note: this does not need the cache lock since it does not modify any of the
151 * PD cache data structures. 151 * PD cache data structures.
152 */ 152 */
153int __nvgpu_pd_cache_alloc_direct(struct gk20a *g, 153int nvgpu_pd_cache_alloc_direct(struct gk20a *g,
154 struct nvgpu_gmmu_pd *pd, u32 bytes) 154 struct nvgpu_gmmu_pd *pd, u32 bytes)
155{ 155{
156 int err; 156 int err;
157 unsigned long flags = 0; 157 unsigned long flags = 0;
@@ -339,7 +339,7 @@ static int nvgpu_pd_cache_alloc(struct gk20a *g, struct nvgpu_pd_cache *cache,
339 * cache logistics. Since on Parker and later GPUs some of the page directories 339 * cache logistics. Since on Parker and later GPUs some of the page directories
340 * are smaller than a page packing these PDs together saves a lot of memory. 340 * are smaller than a page packing these PDs together saves a lot of memory.
341 */ 341 */
342int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes) 342int nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
343{ 343{
344 struct gk20a *g = gk20a_from_vm(vm); 344 struct gk20a *g = gk20a_from_vm(vm);
345 int err; 345 int err;
@@ -349,7 +349,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
349 * alloc. 349 * alloc.
350 */ 350 */
351 if (bytes >= PAGE_SIZE) { 351 if (bytes >= PAGE_SIZE) {
352 err = __nvgpu_pd_cache_alloc_direct(g, pd, bytes); 352 err = nvgpu_pd_cache_alloc_direct(g, pd, bytes);
353 if (err) { 353 if (err) {
354 return err; 354 return err;
355 } 355 }
@@ -368,7 +368,7 @@ int __nvgpu_pd_alloc(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd, u32 bytes)
368 return err; 368 return err;
369} 369}
370 370
371void __nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd) 371void nvgpu_pd_cache_free_direct(struct gk20a *g, struct nvgpu_gmmu_pd *pd)
372{ 372{
373 pd_dbg(g, "PD-Free [D] 0x%p", pd->mem); 373 pd_dbg(g, "PD-Free [D] 0x%p", pd->mem);
374 374
@@ -448,7 +448,7 @@ static void nvgpu_pd_cache_free(struct gk20a *g, struct nvgpu_pd_cache *cache,
448 nvgpu_pd_cache_do_free(g, cache, pentry, pd); 448 nvgpu_pd_cache_do_free(g, cache, pentry, pd);
449} 449}
450 450
451void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd) 451void nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
452{ 452{
453 struct gk20a *g = gk20a_from_vm(vm); 453 struct gk20a *g = gk20a_from_vm(vm);
454 454
@@ -456,7 +456,7 @@ void __nvgpu_pd_free(struct vm_gk20a *vm, struct nvgpu_gmmu_pd *pd)
456 * Simple case: just DMA free. 456 * Simple case: just DMA free.
457 */ 457 */
458 if (!pd->cached) { 458 if (!pd->cached) {
459 return __nvgpu_pd_cache_free_direct(g, pd); 459 return nvgpu_pd_cache_free_direct(g, pd);
460 } 460 }
461 461
462 nvgpu_mutex_acquire(&g->mm.pd_cache->lock); 462 nvgpu_mutex_acquire(&g->mm.pd_cache->lock);