summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/include/nvgpu/semaphore.h')
-rw-r--r--drivers/gpu/nvgpu/include/nvgpu/semaphore.h86
1 files changed, 43 insertions, 43 deletions
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
index 07a27584..cc4921d3 100644
--- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
+++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h
@@ -37,19 +37,19 @@
37#define SEMAPHORE_SIZE 16 37#define SEMAPHORE_SIZE 16
38#define SEMAPHORE_SEA_GROWTH_RATE 32 38#define SEMAPHORE_SEA_GROWTH_RATE 32
39 39
40struct gk20a_semaphore_sea; 40struct nvgpu_semaphore_sea;
41 41
42/* 42/*
43 * Underlying semaphore data structure. This semaphore can be shared amongst 43 * Underlying semaphore data structure. This semaphore can be shared amongst
44 * other semaphore instances. 44 * other semaphore instances.
45 */ 45 */
46struct gk20a_semaphore_int { 46struct nvgpu_semaphore_int {
47 int idx; /* Semaphore index. */ 47 int idx; /* Semaphore index. */
48 u32 offset; /* Offset into the pool. */ 48 u32 offset; /* Offset into the pool. */
49 atomic_t next_value; /* Next available value. */ 49 atomic_t next_value; /* Next available value. */
50 u32 *value; /* Current value (access w/ readl()). */ 50 u32 *value; /* Current value (access w/ readl()). */
51 u32 nr_incrs; /* Number of increments programmed. */ 51 u32 nr_incrs; /* Number of increments programmed. */
52 struct gk20a_semaphore_pool *p; /* Pool that owns this sema. */ 52 struct nvgpu_semaphore_pool *p; /* Pool that owns this sema. */
53 struct channel_gk20a *ch; /* Channel that owns this sema. */ 53 struct channel_gk20a *ch; /* Channel that owns this sema. */
54 struct list_head hw_sema_list; /* List of HW semaphores. */ 54 struct list_head hw_sema_list; /* List of HW semaphores. */
55}; 55};
@@ -59,8 +59,8 @@ struct gk20a_semaphore_int {
59 * pointer to a real semaphore and a value to wait for. This allows one physical 59 * pointer to a real semaphore and a value to wait for. This allows one physical
60 * semaphore to be shared among an essentially infinite number of submits. 60 * semaphore to be shared among an essentially infinite number of submits.
61 */ 61 */
62struct gk20a_semaphore { 62struct nvgpu_semaphore {
63 struct gk20a_semaphore_int *hw_sema; 63 struct nvgpu_semaphore_int *hw_sema;
64 64
65 atomic_t value; 65 atomic_t value;
66 int incremented; 66 int incremented;
@@ -71,7 +71,7 @@ struct gk20a_semaphore {
71/* 71/*
72 * A semaphore pool. Each address space will own exactly one of these. 72 * A semaphore pool. Each address space will own exactly one of these.
73 */ 73 */
74struct gk20a_semaphore_pool { 74struct nvgpu_semaphore_pool {
75 struct page *page; /* This pool's page of memory */ 75 struct page *page; /* This pool's page of memory */
76 struct list_head pool_list_entry; /* Node for list of pools. */ 76 struct list_head pool_list_entry; /* Node for list of pools. */
77 void *cpu_va; /* CPU access to the pool. */ 77 void *cpu_va; /* CPU access to the pool. */
@@ -82,7 +82,7 @@ struct gk20a_semaphore_pool {
82 struct list_head hw_semas; /* List of HW semas. */ 82 struct list_head hw_semas; /* List of HW semas. */
83 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE); 83 DECLARE_BITMAP(semas_alloced, PAGE_SIZE / SEMAPHORE_SIZE);
84 84
85 struct gk20a_semaphore_sea *sema_sea; /* Sea that owns this pool. */ 85 struct nvgpu_semaphore_sea *sema_sea; /* Sea that owns this pool. */
86 86
87 struct mutex pool_lock; 87 struct mutex pool_lock;
88 88
@@ -114,7 +114,7 @@ struct gk20a_semaphore_pool {
114 * pool. Channels then allocate regular semaphores - basically just a value that 114 * pool. Channels then allocate regular semaphores - basically just a value that
115 * signifies when a particular job is done. 115 * signifies when a particular job is done.
116 */ 116 */
117struct gk20a_semaphore_sea { 117struct nvgpu_semaphore_sea {
118 struct list_head pool_list; /* List of pools in this sea. */ 118 struct list_head pool_list; /* List of pools in this sea. */
119 struct gk20a *gk20a; 119 struct gk20a *gk20a;
120 120
@@ -149,33 +149,33 @@ struct gk20a_semaphore_sea {
149/* 149/*
150 * Semaphore sea functions. 150 * Semaphore sea functions.
151 */ 151 */
152struct gk20a_semaphore_sea *gk20a_semaphore_sea_create(struct gk20a *gk20a); 152struct nvgpu_semaphore_sea *nvgpu_semaphore_sea_create(struct gk20a *gk20a);
153int gk20a_semaphore_sea_map(struct gk20a_semaphore_pool *sea, 153int nvgpu_semaphore_sea_map(struct nvgpu_semaphore_pool *sea,
154 struct vm_gk20a *vm); 154 struct vm_gk20a *vm);
155void gk20a_semaphore_sea_unmap(struct gk20a_semaphore_pool *sea, 155void nvgpu_semaphore_sea_unmap(struct nvgpu_semaphore_pool *sea,
156 struct vm_gk20a *vm); 156 struct vm_gk20a *vm);
157struct gk20a_semaphore_sea *gk20a_semaphore_get_sea(struct gk20a *g); 157struct nvgpu_semaphore_sea *nvgpu_semaphore_get_sea(struct gk20a *g);
158 158
159/* 159/*
160 * Semaphore pool functions. 160 * Semaphore pool functions.
161 */ 161 */
162struct gk20a_semaphore_pool *gk20a_semaphore_pool_alloc( 162struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc(
163 struct gk20a_semaphore_sea *sea); 163 struct nvgpu_semaphore_sea *sea);
164int gk20a_semaphore_pool_map(struct gk20a_semaphore_pool *pool, 164int nvgpu_semaphore_pool_map(struct nvgpu_semaphore_pool *pool,
165 struct vm_gk20a *vm); 165 struct vm_gk20a *vm);
166void gk20a_semaphore_pool_unmap(struct gk20a_semaphore_pool *pool, 166void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *pool,
167 struct vm_gk20a *vm); 167 struct vm_gk20a *vm);
168u64 __gk20a_semaphore_pool_gpu_va(struct gk20a_semaphore_pool *p, bool global); 168u64 __nvgpu_semaphore_pool_gpu_va(struct nvgpu_semaphore_pool *p, bool global);
169void gk20a_semaphore_pool_get(struct gk20a_semaphore_pool *p); 169void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p);
170void gk20a_semaphore_pool_put(struct gk20a_semaphore_pool *p); 170void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p);
171 171
172/* 172/*
173 * Semaphore functions. 173 * Semaphore functions.
174 */ 174 */
175struct gk20a_semaphore *gk20a_semaphore_alloc(struct channel_gk20a *ch); 175struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch);
176void gk20a_semaphore_put(struct gk20a_semaphore *s); 176void nvgpu_semaphore_put(struct nvgpu_semaphore *s);
177void gk20a_semaphore_get(struct gk20a_semaphore *s); 177void nvgpu_semaphore_get(struct nvgpu_semaphore *s);
178void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch); 178void nvgpu_semaphore_free_hw_sema(struct channel_gk20a *ch);
179 179
180/* 180/*
181 * Return the address of a specific semaphore. 181 * Return the address of a specific semaphore.
@@ -183,9 +183,9 @@ void gk20a_semaphore_free_hw_sema(struct channel_gk20a *ch);
183 * Don't call this on a semaphore you don't own - the VA returned will make no 183 * Don't call this on a semaphore you don't own - the VA returned will make no
184 * sense in your specific channel's VM. 184 * sense in your specific channel's VM.
185 */ 185 */
186static inline u64 gk20a_semaphore_gpu_rw_va(struct gk20a_semaphore *s) 186static inline u64 nvgpu_semaphore_gpu_rw_va(struct nvgpu_semaphore *s)
187{ 187{
188 return __gk20a_semaphore_pool_gpu_va(s->hw_sema->p, false) + 188 return __nvgpu_semaphore_pool_gpu_va(s->hw_sema->p, false) +
189 s->hw_sema->offset; 189 s->hw_sema->offset;
190} 190}
191 191
@@ -193,22 +193,22 @@ static inline u64 gk20a_semaphore_gpu_rw_va(struct gk20a_semaphore *s)
193 * Get the global RO address for the semaphore. Can be called on any semaphore 193 * Get the global RO address for the semaphore. Can be called on any semaphore
194 * regardless of whether you own it. 194 * regardless of whether you own it.
195 */ 195 */
196static inline u64 gk20a_semaphore_gpu_ro_va(struct gk20a_semaphore *s) 196static inline u64 nvgpu_semaphore_gpu_ro_va(struct nvgpu_semaphore *s)
197{ 197{
198 return __gk20a_semaphore_pool_gpu_va(s->hw_sema->p, true) + 198 return __nvgpu_semaphore_pool_gpu_va(s->hw_sema->p, true) +
199 s->hw_sema->offset; 199 s->hw_sema->offset;
200} 200}
201 201
202static inline u64 gk20a_hw_sema_addr(struct gk20a_semaphore_int *hw_sema) 202static inline u64 nvgpu_hw_sema_addr(struct nvgpu_semaphore_int *hw_sema)
203{ 203{
204 return __gk20a_semaphore_pool_gpu_va(hw_sema->p, true) + 204 return __nvgpu_semaphore_pool_gpu_va(hw_sema->p, true) +
205 hw_sema->offset; 205 hw_sema->offset;
206} 206}
207 207
208/* 208/*
209 * TODO: handle wrap around... Hmm, how to do this? 209 * TODO: handle wrap around... Hmm, how to do this?
210 */ 210 */
211static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s) 211static inline bool nvgpu_semaphore_is_released(struct nvgpu_semaphore *s)
212{ 212{
213 u32 sema_val = readl(s->hw_sema->value); 213 u32 sema_val = readl(s->hw_sema->value);
214 214
@@ -220,25 +220,25 @@ static inline bool gk20a_semaphore_is_released(struct gk20a_semaphore *s)
220 return (int)sema_val >= atomic_read(&s->value); 220 return (int)sema_val >= atomic_read(&s->value);
221} 221}
222 222
223static inline bool gk20a_semaphore_is_acquired(struct gk20a_semaphore *s) 223static inline bool nvgpu_semaphore_is_acquired(struct nvgpu_semaphore *s)
224{ 224{
225 return !gk20a_semaphore_is_released(s); 225 return !nvgpu_semaphore_is_released(s);
226} 226}
227 227
228/* 228/*
229 * Read the underlying value from a semaphore. 229 * Read the underlying value from a semaphore.
230 */ 230 */
231static inline u32 gk20a_semaphore_read(struct gk20a_semaphore *s) 231static inline u32 nvgpu_semaphore_read(struct nvgpu_semaphore *s)
232{ 232{
233 return readl(s->hw_sema->value); 233 return readl(s->hw_sema->value);
234} 234}
235 235
236static inline u32 gk20a_semaphore_get_value(struct gk20a_semaphore *s) 236static inline u32 nvgpu_semaphore_get_value(struct nvgpu_semaphore *s)
237{ 237{
238 return (u32)atomic_read(&s->value); 238 return (u32)atomic_read(&s->value);
239} 239}
240 240
241static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s) 241static inline u32 nvgpu_semaphore_next_value(struct nvgpu_semaphore *s)
242{ 242{
243 return (u32)atomic_read(&s->hw_sema->next_value); 243 return (u32)atomic_read(&s->hw_sema->next_value);
244} 244}
@@ -247,11 +247,11 @@ static inline u32 gk20a_semaphore_next_value(struct gk20a_semaphore *s)
247 * If @force is set then this will not wait for the underlying semaphore to 247 * If @force is set then this will not wait for the underlying semaphore to
248 * catch up to the passed semaphore. 248 * catch up to the passed semaphore.
249 */ 249 */
250static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s, 250static inline void __nvgpu_semaphore_release(struct nvgpu_semaphore *s,
251 bool force) 251 bool force)
252{ 252{
253 u32 current_val; 253 u32 current_val;
254 u32 val = gk20a_semaphore_get_value(s); 254 u32 val = nvgpu_semaphore_get_value(s);
255 int attempts = 0; 255 int attempts = 0;
256 256
257 /* 257 /*
@@ -260,7 +260,7 @@ static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s,
260 * 260 *
261 * TODO: tune the wait a little better. 261 * TODO: tune the wait a little better.
262 */ 262 */
263 while ((current_val = gk20a_semaphore_read(s)) < (val - 1)) { 263 while ((current_val = nvgpu_semaphore_read(s)) < (val - 1)) {
264 if (force) 264 if (force)
265 break; 265 break;
266 msleep(100); 266 msleep(100);
@@ -284,21 +284,21 @@ static inline void __gk20a_semaphore_release(struct gk20a_semaphore *s,
284 s->hw_sema->ch->hw_chid, val); 284 s->hw_sema->ch->hw_chid, val);
285} 285}
286 286
287static inline void gk20a_semaphore_release(struct gk20a_semaphore *s) 287static inline void nvgpu_semaphore_release(struct nvgpu_semaphore *s)
288{ 288{
289 __gk20a_semaphore_release(s, false); 289 __nvgpu_semaphore_release(s, false);
290} 290}
291 291
292/* 292/*
293 * Configure a software based increment on this semaphore. This is useful for 293 * Configure a software based increment on this semaphore. This is useful for
294 * when we want the GPU to wait on a SW event before processing a channel. 294 * when we want the GPU to wait on a SW event before processing a channel.
295 * Another way to describe this is when the GPU needs to wait on a SW pre-fence. 295 * Another way to describe this is when the GPU needs to wait on a SW pre-fence.
296 * The pre-fence signals SW which in turn calls gk20a_semaphore_release() which 296 * The pre-fence signals SW which in turn calls nvgpu_semaphore_release() which
297 * then allows the GPU to continue. 297 * then allows the GPU to continue.
298 * 298 *
299 * Also used to prep a semaphore for an INCR by the GPU. 299 * Also used to prep a semaphore for an INCR by the GPU.
300 */ 300 */
301static inline void gk20a_semaphore_incr(struct gk20a_semaphore *s) 301static inline void nvgpu_semaphore_incr(struct nvgpu_semaphore *s)
302{ 302{
303 BUG_ON(s->incremented); 303 BUG_ON(s->incremented);
304 304
@@ -307,6 +307,6 @@ static inline void gk20a_semaphore_incr(struct gk20a_semaphore *s)
307 307
308 gpu_sema_verbose_dbg("INCR sema for c=%d (%u)", 308 gpu_sema_verbose_dbg("INCR sema for c=%d (%u)",
309 s->hw_sema->ch->hw_chid, 309 s->hw_sema->ch->hw_chid,
310 gk20a_semaphore_next_value(s)); 310 nvgpu_semaphore_next_value(s));
311} 311}
312#endif 312#endif