summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
diff options
context:
space:
mode:
authorDeepak Nibade <dnibade@nvidia.com>2017-01-24 08:30:42 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2017-02-22 07:15:02 -0500
commit8ee3aa4b3175d8d27e57a0f5d5e2cdf3d78a4a58 (patch)
tree505dfd2ea2aca2f1cbdb254baee980862d21e04d /drivers/gpu/nvgpu/gk20a/channel_gk20a.h
parent1f855af63fdd31fe3dcfee75f4f5f9b62f30d87e (diff)
gpu: nvgpu: use common nvgpu mutex/spinlock APIs
Instead of using Linux APIs for mutex and spinlocks directly, use new APIs defined in <nvgpu/lock.h> Replace Linux specific mutex/spinlock declaration, init, lock, unlock APIs with new APIs e.g struct mutex is replaced by struct nvgpu_mutex and mutex_lock() is replaced by nvgpu_mutex_acquire() And also include <nvgpu/lock.h> instead of including <linux/mutex.h> and <linux/spinlock.h> Add explicit nvgpu/lock.h includes to below files to fix complilation failures. gk20a/platform_gk20a.h include/nvgpu/allocator.h Jira NVGPU-13 Change-Id: I81a05d21ecdbd90c2076a9f0aefd0e40b215bd33 Signed-off-by: Deepak Nibade <dnibade@nvidia.com> Reviewed-on: http://git-master/r/1293187 Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/channel_gk20a.h')
-rw-r--r--drivers/gpu/nvgpu/gk20a/channel_gk20a.h34
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
index f940a271..14ee9f69 100644
--- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
+++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.h
@@ -19,15 +19,15 @@
19#define CHANNEL_GK20A_H 19#define CHANNEL_GK20A_H
20 20
21#include <linux/log2.h> 21#include <linux/log2.h>
22#include <linux/mutex.h>
23#include <linux/poll.h> 22#include <linux/poll.h>
24#include <linux/semaphore.h> 23#include <linux/semaphore.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26#include <linux/spinlock.h>
27#include <linux/stacktrace.h> 25#include <linux/stacktrace.h>
28#include <linux/wait.h> 26#include <linux/wait.h>
29#include <uapi/linux/nvgpu.h> 27#include <uapi/linux/nvgpu.h>
30 28
29#include <nvgpu/lock.h>
30
31struct gk20a; 31struct gk20a;
32struct gr_gk20a; 32struct gr_gk20a;
33struct dbg_session_gk20a; 33struct dbg_session_gk20a;
@@ -80,18 +80,18 @@ struct channel_gk20a_joblist {
80 unsigned int put; 80 unsigned int put;
81 unsigned int get; 81 unsigned int get;
82 struct channel_gk20a_job *jobs; 82 struct channel_gk20a_job *jobs;
83 struct mutex read_lock; 83 struct nvgpu_mutex read_lock;
84 } pre_alloc; 84 } pre_alloc;
85 85
86 struct { 86 struct {
87 struct list_head jobs; 87 struct list_head jobs;
88 spinlock_t lock; 88 struct nvgpu_spinlock lock;
89 } dynamic; 89 } dynamic;
90}; 90};
91 91
92struct channel_gk20a_timeout { 92struct channel_gk20a_timeout {
93 struct delayed_work wq; 93 struct delayed_work wq;
94 raw_spinlock_t lock; 94 struct nvgpu_raw_spinlock lock;
95 bool initialized; 95 bool initialized;
96 u32 gp_get; 96 u32 gp_get;
97}; 97};
@@ -106,12 +106,12 @@ struct gk20a_event_id_data {
106 bool event_posted; 106 bool event_posted;
107 107
108 wait_queue_head_t event_id_wq; 108 wait_queue_head_t event_id_wq;
109 struct mutex lock; 109 struct nvgpu_mutex lock;
110 struct list_head event_id_node; 110 struct list_head event_id_node;
111}; 111};
112 112
113struct channel_gk20a_clean_up { 113struct channel_gk20a_clean_up {
114 struct mutex lock; 114 struct nvgpu_mutex lock;
115 bool scheduled; 115 bool scheduled;
116 struct delayed_work wq; 116 struct delayed_work wq;
117}; 117};
@@ -156,7 +156,7 @@ struct channel_gk20a {
156 156
157 struct list_head free_chs; 157 struct list_head free_chs;
158 158
159 spinlock_t ref_obtain_lock; 159 struct nvgpu_spinlock ref_obtain_lock;
160 bool referenceable; 160 bool referenceable;
161 atomic_t ref_count; 161 atomic_t ref_count;
162 wait_queue_head_t ref_count_dec_wq; 162 wait_queue_head_t ref_count_dec_wq;
@@ -169,7 +169,7 @@ struct channel_gk20a {
169 struct channel_gk20a_ref_action ref_actions[ 169 struct channel_gk20a_ref_action ref_actions[
170 GK20A_CHANNEL_REFCOUNT_TRACKING]; 170 GK20A_CHANNEL_REFCOUNT_TRACKING];
171 size_t ref_actions_put; /* index of next write */ 171 size_t ref_actions_put; /* index of next write */
172 spinlock_t ref_actions_lock; 172 struct nvgpu_spinlock ref_actions_lock;
173#endif 173#endif
174 174
175 struct nvgpu_semaphore_int *hw_sema; 175 struct nvgpu_semaphore_int *hw_sema;
@@ -183,7 +183,7 @@ struct channel_gk20a {
183 bool cde; 183 bool cde;
184 pid_t pid; 184 pid_t pid;
185 pid_t tgid; 185 pid_t tgid;
186 struct mutex ioctl_lock; 186 struct nvgpu_mutex ioctl_lock;
187 187
188 int tsgid; 188 int tsgid;
189 struct list_head ch_entry; /* channel's entry in TSG */ 189 struct list_head ch_entry; /* channel's entry in TSG */
@@ -221,17 +221,17 @@ struct channel_gk20a {
221 void *cyclestate_buffer; 221 void *cyclestate_buffer;
222 u32 cyclestate_buffer_size; 222 u32 cyclestate_buffer_size;
223 struct dma_buf *cyclestate_buffer_handler; 223 struct dma_buf *cyclestate_buffer_handler;
224 struct mutex cyclestate_buffer_mutex; 224 struct nvgpu_mutex cyclestate_buffer_mutex;
225 } cyclestate; 225 } cyclestate;
226 226
227 struct mutex cs_client_mutex; 227 struct nvgpu_mutex cs_client_mutex;
228 struct gk20a_cs_snapshot_client *cs_client; 228 struct gk20a_cs_snapshot_client *cs_client;
229#endif 229#endif
230 struct mutex dbg_s_lock; 230 struct nvgpu_mutex dbg_s_lock;
231 struct list_head dbg_s_list; 231 struct list_head dbg_s_list;
232 232
233 struct list_head event_id_list; 233 struct list_head event_id_list;
234 struct mutex event_id_list_lock; 234 struct nvgpu_mutex event_id_list_lock;
235 235
236 bool has_timedout; 236 bool has_timedout;
237 u32 timeout_ms_max; 237 u32 timeout_ms_max;
@@ -241,9 +241,9 @@ struct channel_gk20a {
241 struct dma_buf *error_notifier_ref; 241 struct dma_buf *error_notifier_ref;
242 struct nvgpu_notification *error_notifier; 242 struct nvgpu_notification *error_notifier;
243 void *error_notifier_va; 243 void *error_notifier_va;
244 struct mutex error_notifier_mutex; 244 struct nvgpu_mutex error_notifier_mutex;
245 245
246 struct mutex sync_lock; 246 struct nvgpu_mutex sync_lock;
247 struct gk20a_channel_sync *sync; 247 struct gk20a_channel_sync *sync;
248 248
249#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION 249#ifdef CONFIG_TEGRA_GR_VIRTUALIZATION
@@ -254,7 +254,7 @@ struct channel_gk20a {
254 * via schedule_work */ 254 * via schedule_work */
255 void (*update_fn)(struct channel_gk20a *, void *); 255 void (*update_fn)(struct channel_gk20a *, void *);
256 void *update_fn_data; 256 void *update_fn_data;
257 spinlock_t update_fn_lock; /* make access to the two above atomic */ 257 struct nvgpu_spinlock update_fn_lock; /* make access to the two above atomic */
258 struct work_struct update_fn_work; 258 struct work_struct update_fn_work;
259 259
260 u32 interleave_level; 260 u32 interleave_level;