diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/driver_common.c | 2 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/module.c | 19 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/os_linux.h | 2 |
3 files changed, 14 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c index 5f2961f4..b82e3784 100644 --- a/drivers/gpu/nvgpu/common/linux/driver_common.c +++ b/drivers/gpu/nvgpu/common/linux/driver_common.c | |||
@@ -42,7 +42,7 @@ static void nvgpu_init_vars(struct gk20a *g) | |||
42 | init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq); | 42 | init_waitqueue_head(&l->sw_irq_nonstall_last_handled_wq); |
43 | gk20a_init_gr(g); | 43 | gk20a_init_gr(g); |
44 | 44 | ||
45 | init_rwsem(&g->busy_lock); | 45 | init_rwsem(&l->busy_lock); |
46 | init_rwsem(&g->deterministic_busy); | 46 | init_rwsem(&g->deterministic_busy); |
47 | 47 | ||
48 | nvgpu_spinlock_init(&g->mc_enable_lock); | 48 | nvgpu_spinlock_init(&g->mc_enable_lock); |
diff --git a/drivers/gpu/nvgpu/common/linux/module.c b/drivers/gpu/nvgpu/common/linux/module.c index c474f36a..dcff089b 100644 --- a/drivers/gpu/nvgpu/common/linux/module.c +++ b/drivers/gpu/nvgpu/common/linux/module.c | |||
@@ -63,6 +63,7 @@ void gk20a_busy_noresume(struct gk20a *g) | |||
63 | 63 | ||
64 | int gk20a_busy(struct gk20a *g) | 64 | int gk20a_busy(struct gk20a *g) |
65 | { | 65 | { |
66 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
66 | int ret = 0; | 67 | int ret = 0; |
67 | struct device *dev; | 68 | struct device *dev; |
68 | 69 | ||
@@ -71,7 +72,7 @@ int gk20a_busy(struct gk20a *g) | |||
71 | 72 | ||
72 | atomic_inc(&g->usage_count.atomic_var); | 73 | atomic_inc(&g->usage_count.atomic_var); |
73 | 74 | ||
74 | down_read(&g->busy_lock); | 75 | down_read(&l->busy_lock); |
75 | 76 | ||
76 | if (!gk20a_can_busy(g)) { | 77 | if (!gk20a_can_busy(g)) { |
77 | ret = -ENODEV; | 78 | ret = -ENODEV; |
@@ -107,7 +108,7 @@ int gk20a_busy(struct gk20a *g) | |||
107 | } | 108 | } |
108 | 109 | ||
109 | fail: | 110 | fail: |
110 | up_read(&g->busy_lock); | 111 | up_read(&l->busy_lock); |
111 | 112 | ||
112 | return ret < 0 ? ret : 0; | 113 | return ret < 0 ? ret : 0; |
113 | } | 114 | } |
@@ -282,12 +283,13 @@ static struct of_device_id tegra_gk20a_of_match[] = { | |||
282 | * | 283 | * |
283 | * In success, this call MUST be balanced by caller with __gk20a_do_unidle() | 284 | * In success, this call MUST be balanced by caller with __gk20a_do_unidle() |
284 | * | 285 | * |
285 | * Acquires two locks : &g->busy_lock and &platform->railgate_lock | 286 | * Acquires two locks : &l->busy_lock and &platform->railgate_lock |
286 | * In success, we hold these locks and return | 287 | * In success, we hold these locks and return |
287 | * In failure, we release these locks and return | 288 | * In failure, we release these locks and return |
288 | */ | 289 | */ |
289 | int __gk20a_do_idle(struct gk20a *g, bool force_reset) | 290 | int __gk20a_do_idle(struct gk20a *g, bool force_reset) |
290 | { | 291 | { |
292 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
291 | struct device *dev = dev_from_gk20a(g); | 293 | struct device *dev = dev_from_gk20a(g); |
292 | struct gk20a_platform *platform = dev_get_drvdata(dev); | 294 | struct gk20a_platform *platform = dev_get_drvdata(dev); |
293 | struct nvgpu_timeout timeout; | 295 | struct nvgpu_timeout timeout; |
@@ -303,7 +305,7 @@ int __gk20a_do_idle(struct gk20a *g, bool force_reset) | |||
303 | gk20a_channel_deterministic_idle(g); | 305 | gk20a_channel_deterministic_idle(g); |
304 | 306 | ||
305 | /* acquire busy lock to block other busy() calls */ | 307 | /* acquire busy lock to block other busy() calls */ |
306 | down_write(&g->busy_lock); | 308 | down_write(&l->busy_lock); |
307 | 309 | ||
308 | /* acquire railgate lock to prevent unrailgate in midst of do_idle() */ | 310 | /* acquire railgate lock to prevent unrailgate in midst of do_idle() */ |
309 | nvgpu_mutex_acquire(&platform->railgate_lock); | 311 | nvgpu_mutex_acquire(&platform->railgate_lock); |
@@ -406,7 +408,7 @@ fail_drop_usage_count: | |||
406 | pm_runtime_put_noidle(dev); | 408 | pm_runtime_put_noidle(dev); |
407 | fail_timeout: | 409 | fail_timeout: |
408 | nvgpu_mutex_release(&platform->railgate_lock); | 410 | nvgpu_mutex_release(&platform->railgate_lock); |
409 | up_write(&g->busy_lock); | 411 | up_write(&l->busy_lock); |
410 | gk20a_channel_deterministic_unidle(g); | 412 | gk20a_channel_deterministic_unidle(g); |
411 | return -EBUSY; | 413 | return -EBUSY; |
412 | } | 414 | } |
@@ -429,6 +431,7 @@ static int gk20a_do_idle(void *_g) | |||
429 | */ | 431 | */ |
430 | int __gk20a_do_unidle(struct gk20a *g) | 432 | int __gk20a_do_unidle(struct gk20a *g) |
431 | { | 433 | { |
434 | struct nvgpu_os_linux *l = nvgpu_os_linux_from_gk20a(g); | ||
432 | struct device *dev = dev_from_gk20a(g); | 435 | struct device *dev = dev_from_gk20a(g); |
433 | struct gk20a_platform *platform = dev_get_drvdata(dev); | 436 | struct gk20a_platform *platform = dev_get_drvdata(dev); |
434 | int err; | 437 | int err; |
@@ -453,7 +456,7 @@ int __gk20a_do_unidle(struct gk20a *g) | |||
453 | 456 | ||
454 | /* release the lock and open up all other busy() calls */ | 457 | /* release the lock and open up all other busy() calls */ |
455 | nvgpu_mutex_release(&platform->railgate_lock); | 458 | nvgpu_mutex_release(&platform->railgate_lock); |
456 | up_write(&g->busy_lock); | 459 | up_write(&l->busy_lock); |
457 | 460 | ||
458 | gk20a_channel_deterministic_unidle(g); | 461 | gk20a_channel_deterministic_unidle(g); |
459 | 462 | ||
@@ -887,12 +890,12 @@ void gk20a_driver_start_unload(struct gk20a *g) | |||
887 | 890 | ||
888 | gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); | 891 | gk20a_dbg(gpu_dbg_shutdown, "Driver is now going down!\n"); |
889 | 892 | ||
890 | down_write(&g->busy_lock); | 893 | down_write(&l->busy_lock); |
891 | __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); | 894 | __nvgpu_set_enabled(g, NVGPU_DRIVER_IS_DYING, true); |
892 | /* GR SW ready needs to be invalidated at this time with the busy lock | 895 | /* GR SW ready needs to be invalidated at this time with the busy lock |
893 | * held to prevent a racing condition on the gr/mm code */ | 896 | * held to prevent a racing condition on the gr/mm code */ |
894 | g->gr.sw_ready = false; | 897 | g->gr.sw_ready = false; |
895 | up_write(&g->busy_lock); | 898 | up_write(&l->busy_lock); |
896 | 899 | ||
897 | if (g->is_virtual) | 900 | if (g->is_virtual) |
898 | return; | 901 | return; |
diff --git a/drivers/gpu/nvgpu/common/linux/os_linux.h b/drivers/gpu/nvgpu/common/linux/os_linux.h index 160a5738..48479843 100644 --- a/drivers/gpu/nvgpu/common/linux/os_linux.h +++ b/drivers/gpu/nvgpu/common/linux/os_linux.h | |||
@@ -110,6 +110,8 @@ struct nvgpu_os_linux { | |||
110 | struct dentry *debugfs_dump_ctxsw_stats; | 110 | struct dentry *debugfs_dump_ctxsw_stats; |
111 | #endif | 111 | #endif |
112 | struct gk20a_cde_app cde_app; | 112 | struct gk20a_cde_app cde_app; |
113 | |||
114 | struct rw_semaphore busy_lock; | ||
113 | }; | 115 | }; |
114 | 116 | ||
115 | static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g) | 117 | static inline struct nvgpu_os_linux *nvgpu_os_linux_from_gk20a(struct gk20a *g) |