summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
diff options
context:
space:
mode:
authorKonsta Holtta <kholtta@nvidia.com>2016-09-08 07:42:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2016-09-14 16:03:44 -0400
commit93d31990197897471e291d83361d9d57769cd6f2 (patch)
treeffea2b1cdcebfbdabb7b54a758c98a12c08fb2ec /drivers/gpu/nvgpu/gk20a/mm_gk20a.h
parent54e22a2bae2ad7df5fae7a8af873ef3977436705 (diff)
gpu: nvgpu: test free user vidmem atomically
An empty list of soon-to-be-freed userspace vidmem buffers is not enough to safely assume that an allocation may succeed or not if tried again, because removal from the list and actually marking the memory freed is not atomic. Fix this by using an atomic counter for the number of pending frees (so that it's still safe to first remove from the job list and then perform the free), and making allocation attempts combined with a test of pending frees atomic. This still does not guarantee that there is memory available (as the actual amount of pending memory in bytes plus the current free amount isn't computed), but removes the race that produces false negatives in case a single program expects repeated frees and allocs to succeed. Bug 1809939 Change-Id: I6a92da2e21cbf3f886b727000c924d56f35ce55b Signed-off-by: Konsta Holtta <kholtta@nvidia.com> Reviewed-on: http://git-master/r/1217078 (cherry picked from commit 83c1f1e70dccd92fdd4481132cf5b6717760d432) Reviewed-on: http://git-master/r/1220545 Reviewed-by: Automatic_Commit_Validation_User GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
index 9002290e..8ce110a1 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h
@@ -420,8 +420,10 @@ struct mm_gk20a {
420 420
421 struct list_head clear_list_head; 421 struct list_head clear_list_head;
422 struct mutex clear_list_mutex; 422 struct mutex clear_list_mutex;
423
424 struct work_struct clear_mem_worker;
425 atomic_t clears_pending;
423 } vidmem; 426 } vidmem;
424 struct work_struct vidmem_clear_mem_worker;
425}; 427};
426 428
427int gk20a_mm_init(struct mm_gk20a *mm); 429int gk20a_mm_init(struct mm_gk20a *mm);