From e26ce10cc6b59314ccf5931a8c5b46a9e57b085a Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 5 Oct 2017 17:22:41 -0700 Subject: gpu: nvgpu: Convert VIDMEM work_struct to thread Convert the work_struct used by the vidmem background clearing to a thread to make it more cross platform. The thread waits on a condition variable to determine when work needs to be done. The signal comes from the DMA API when it enqueues a new nvgpu_mem that needs clearing. Add logic for handling suspend: the CE cannot be accessed while the GPU is suspended. As such the background thread must be paused while the GPU is suspended and the CE is not available. Several other changes were also made: o Move the code that enqueues a nvgpu_mem from the DMA API code to a function in the VIDMEM code. o Move nvgpu_vidmem_get_pending_alloc() to the Linux specific code as this function is only used there. It's a trivial function that QNX can easily implement as well. o Remove the was_empty logic from the enqueue. Now just always signal the condition variable when anew nvgpu_mem comes in. o Move CE suspend to after MM suspend. JIRA NVGPU-30 JIRA NVGPU-138 Change-Id: Ie9286ae5a127c3fced86dfb9794e7d81eab0491c Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1574498 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/common/linux/dma.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/nvgpu/common/linux/dma.c') diff --git a/drivers/gpu/nvgpu/common/linux/dma.c b/drivers/gpu/nvgpu/common/linux/dma.c index b62c4593..9e9d1007 100644 --- a/drivers/gpu/nvgpu/common/linux/dma.c +++ b/drivers/gpu/nvgpu/common/linux/dma.c @@ -514,7 +514,6 @@ static void nvgpu_dma_free_sys(struct gk20a *g, struct nvgpu_mem *mem) static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) { #if defined(CONFIG_GK20A_VIDMEM) - bool was_empty; size_t mem_size = mem->size; dma_dbg_free(g, mem->size, mem->priv.flags, "vidmem"); @@ -523,18 +522,19 @@ static void nvgpu_dma_free_vid(struct gk20a *g, struct nvgpu_mem *mem) WARN_ON(mem->priv.flags != NVGPU_DMA_NO_KERNEL_MAPPING); if (mem->mem_flags & NVGPU_MEM_FLAG_USER_MEM) { - nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex); - was_empty = nvgpu_list_empty(&g->mm.vidmem.clear_list_head); - nvgpu_list_add_tail(&mem->clear_list_entry, - &g->mm.vidmem.clear_list_head); - atomic64_add(mem->aligned_size, - &g->mm.vidmem.bytes_pending.atomic_var); - nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex); - - if (was_empty) { - cancel_work_sync(&g->mm.vidmem.clear_mem_worker); - schedule_work(&g->mm.vidmem.clear_mem_worker); - } + int err = nvgpu_vidmem_clear_list_enqueue(g, mem); + + /* + * If there's an error here then that means we can't clear the + * vidmem. That's too bad; however, we still own the nvgpu_mem + * buf so we have to free that. + * + * We don't need to worry about the vidmem allocator itself + * since when that gets cleaned up in the driver shutdown path + * all the outstanding allocs are force freed. + */ + if (err) + nvgpu_kfree(g, mem); } else { nvgpu_memset(g, mem, 0, 0, mem->aligned_size); nvgpu_free(mem->allocator, -- cgit v1.2.2