summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2016-04-01 06:55:52 -0400
committerTerje Bergstrom <tbergstrom@nvidia.com>2016-04-19 11:17:30 -0400
commit2ac8c9729a5b7ca0b0bdc053e72d2b4658f8bed7 (patch)
tree36e0371f952b821bb6f117616a781667b56565c2 /drivers/gpu/nvgpu/gk20a/channel_gk20a.c
parentc8d17e9167dec4282c04bbb6581ea1de5fbf9ac2 (diff)
gpu: nvgpu: make jobs_lock more fine grained
While processing all the jobs in gk20a_channel_clean_up_jobs(), We currently acquire jobs_lock, traverse the list, clean up the jobs, and then release the lock But in this case we might hold the lock for too long blocking the submit path Hence make jobs_lock more fine grained by restricting it for list accesses only Bug 200187553 Change-Id: If82af8ff386f7bc29061cfd57fdda7df62f11c17 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1120412 Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Tested-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
index 1b9047c0..e6ecac14 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c
@@ -1862,23 +1862,33 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work)
1862 struct channel_gk20a *c = container_of(to_delayed_work(work), 1862 struct channel_gk20a *c = container_of(to_delayed_work(work),
1863 struct channel_gk20a, clean_up.wq); 1863 struct channel_gk20a, clean_up.wq);
1864 struct vm_gk20a *vm; 1864 struct vm_gk20a *vm;
1865 struct channel_gk20a_job *job, *n; 1865 struct channel_gk20a_job *job;
1866 struct gk20a_platform *platform; 1866 struct gk20a_platform *platform;
1867 struct gk20a *g;
1867 1868
1868 c = gk20a_channel_get(c); 1869 c = gk20a_channel_get(c);
1869 if (!c) 1870 if (!c)
1870 return; 1871 return;
1871 1872
1872 vm = c->vm; 1873 vm = c->vm;
1873 platform = gk20a_get_platform(c->g->dev); 1874 g = c->g;
1875 platform = gk20a_get_platform(g->dev);
1874 1876
1875 gk20a_channel_cancel_job_clean_up(c, false); 1877 gk20a_channel_cancel_job_clean_up(c, false);
1876 1878
1877 mutex_lock(&c->jobs_lock); 1879 while (1) {
1878 list_for_each_entry_safe(job, n, &c->jobs, list) { 1880 bool completed;
1879 struct gk20a *g = c->g;
1880 1881
1881 bool completed = gk20a_fence_is_expired(job->post_fence); 1882 mutex_lock(&c->jobs_lock);
1883 if (list_empty(&c->jobs)) {
1884 mutex_unlock(&c->jobs_lock);
1885 break;
1886 }
1887 job = list_first_entry(&c->jobs,
1888 struct channel_gk20a_job, list);
1889 mutex_unlock(&c->jobs_lock);
1890
1891 completed = gk20a_fence_is_expired(job->post_fence);
1882 if (!completed) { 1892 if (!completed) {
1883 gk20a_channel_timeout_start(c, job); 1893 gk20a_channel_timeout_start(c, job);
1884 break; 1894 break;
@@ -1919,13 +1929,15 @@ static void gk20a_channel_clean_up_jobs(struct work_struct *work)
1919 * so this wouldn't get freed here. */ 1929 * so this wouldn't get freed here. */
1920 gk20a_channel_put(c); 1930 gk20a_channel_put(c);
1921 1931
1932 mutex_lock(&c->jobs_lock);
1922 list_del_init(&job->list); 1933 list_del_init(&job->list);
1934 mutex_unlock(&c->jobs_lock);
1935
1923 kfree(job); 1936 kfree(job);
1937
1924 gk20a_idle(g->dev); 1938 gk20a_idle(g->dev);
1925 } 1939 }
1926 1940
1927 mutex_unlock(&c->jobs_lock);
1928
1929 if (c->update_fn) 1941 if (c->update_fn)
1930 schedule_work(&c->update_fn_work); 1942 schedule_work(&c->update_fn_work);
1931 1943