summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-08-10 11:09:47 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-01 12:10:31 -0400
commit6a48f4b3350f933b171edd4fac4a6860e53c2d64 (patch)
tree2bdf546bfff5744f0c236acef02c2aa917e239cc /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parentf79639f61858c377cf1f3facfc0ce631f787f0e6 (diff)
gpu: nvgpu: clear vidmem buffers in worker
We clear buffers allocated in vidmem in buffer free path. But to clear buffers, we need to submit CE jobs and this could cause issues/races if free called from critical path Hence solve this by moving buffer clear/free to a worker gk20a_gmmu_free_attr_vid() will now just put mem_desc into a list and schedule a worker And worker thread will traverse the list and clear/free the allocations In struct gk20a_vidmem_buf, mem variable is statically allocated. But since we delay free of mem, convert this variable into a pointer and allocate it dynamically Since we delay free of vidmem memory, it is now possible to face OOM conditions during allocations. Hence while allocating block until we have sufficient memory available with an upper limit of 1S Jira DNVGPU-84 Change-Id: I7925590644afae50b6fc04c6e1e43bbaa1c220fd Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1201346 (cherry picked from commit b4dec4a30de2431369d677acca00e420f8e581a5) Reviewed-on: http://git-master/r/1210950 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index c6360955..54d3dfd0 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -71,6 +71,7 @@ struct mem_desc {
71 size_t size; 71 size_t size;
72 u64 gpu_va; 72 u64 gpu_va;
73 bool fixed; /* vidmem only */ 73 bool fixed; /* vidmem only */
74 struct list_head clear_list_entry; /* vidmem only */
74}; 75};
75 76
76struct mem_desc_sub { 77struct mem_desc_sub {
@@ -414,7 +415,11 @@ struct mm_gk20a {
414 415
415 u32 ce_ctx_id; 416 u32 ce_ctx_id;
416 bool cleared; 417 bool cleared;
418
419 struct list_head clear_list_head;
420 struct mutex clear_list_mutex;
417 } vidmem; 421 } vidmem;
422 struct work_struct vidmem_clear_mem_worker;
418}; 423};
419 424
420int gk20a_mm_init(struct mm_gk20a *mm); 425int gk20a_mm_init(struct mm_gk20a *mm);