summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/clk/clk_arb.c
diff options
context:
space:
mode:
authorDebarshi Dutta <ddutta@nvidia.com>2017-08-03 06:04:44 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-08-17 17:26:47 -0400
commit98186ec2c2127c2af65a34f9e697e04f518a79ab (patch)
tree08ad87f3bf8c739e96b36f01728a8f7a30749a0e /drivers/gpu/nvgpu/clk/clk_arb.c
parent49dc335cfe588179cbb42d8bab53bc76ba88b28f (diff)
gpu: nvgpu: Add wrapper over atomic_t and atomic64_t
- added wrapper structs nvgpu_atomic_t and nvgpu_atomic64_t over atomic_t and atomic64_t - added nvgpu_atomic_* and nvgpu_atomic64_* APIs to access the above wrappers. JIRA NVGPU-121 Change-Id: I61667bb0a84c2fc475365abb79bffb42b8b4786a Signed-off-by: Debarshi Dutta <ddutta@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1533044 Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers/gpu/nvgpu/clk/clk_arb.c')
-rw-r--r--drivers/gpu/nvgpu/clk/clk_arb.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c
index 3caa5409..b00ecd31 100644
--- a/drivers/gpu/nvgpu/clk/clk_arb.c
+++ b/drivers/gpu/nvgpu/clk/clk_arb.c
@@ -108,8 +108,8 @@ struct nvgpu_clk_notification {
108 108
109struct nvgpu_clk_notification_queue { 109struct nvgpu_clk_notification_queue {
110 u32 size; 110 u32 size;
111 atomic_t head; 111 nvgpu_atomic_t head;
112 atomic_t tail; 112 nvgpu_atomic_t tail;
113 struct nvgpu_clk_notification *notifications; 113 struct nvgpu_clk_notification *notifications;
114}; 114};
115 115
@@ -183,13 +183,13 @@ struct nvgpu_clk_arb {
183 u32 vf_table_index; 183 u32 vf_table_index;
184 184
185 u16 *mclk_f_points; 185 u16 *mclk_f_points;
186 atomic_t req_nr; 186 nvgpu_atomic_t req_nr;
187 187
188 u32 mclk_f_numpoints; 188 u32 mclk_f_numpoints;
189 u16 *gpc2clk_f_points; 189 u16 *gpc2clk_f_points;
190 u32 gpc2clk_f_numpoints; 190 u32 gpc2clk_f_numpoints;
191 191
192 atomic64_t alarm_mask; 192 nvgpu_atomic64_t alarm_mask;
193 struct nvgpu_clk_notification_queue notification_queue; 193 struct nvgpu_clk_notification_queue notification_queue;
194 194
195#ifdef CONFIG_DEBUG_FS 195#ifdef CONFIG_DEBUG_FS
@@ -206,11 +206,11 @@ struct nvgpu_clk_dev {
206 struct llist_node node; 206 struct llist_node node;
207 }; 207 };
208 wait_queue_head_t readout_wq; 208 wait_queue_head_t readout_wq;
209 atomic_t poll_mask; 209 nvgpu_atomic_t poll_mask;
210 u16 gpc2clk_target_mhz; 210 u16 gpc2clk_target_mhz;
211 u16 mclk_target_mhz; 211 u16 mclk_target_mhz;
212 u32 alarms_reported; 212 u32 alarms_reported;
213 atomic_t enabled_mask; 213 nvgpu_atomic_t enabled_mask;
214 struct nvgpu_clk_notification_queue queue; 214 struct nvgpu_clk_notification_queue queue;
215 u32 arb_queue_head; 215 u32 arb_queue_head;
216 struct kref refcount; 216 struct kref refcount;
@@ -253,8 +253,8 @@ static int nvgpu_clk_notification_queue_alloc(struct gk20a *g,
253 return -ENOMEM; 253 return -ENOMEM;
254 queue->size = events_number; 254 queue->size = events_number;
255 255
256 atomic_set(&queue->head, 0); 256 nvgpu_atomic_set(&queue->head, 0);
257 atomic_set(&queue->tail, 0); 257 nvgpu_atomic_set(&queue->tail, 0);
258 258
259 return 0; 259 return 0;
260} 260}
@@ -263,8 +263,8 @@ static void nvgpu_clk_notification_queue_free(struct gk20a *g,
263 struct nvgpu_clk_notification_queue *queue) { 263 struct nvgpu_clk_notification_queue *queue) {
264 nvgpu_kfree(g, queue->notifications); 264 nvgpu_kfree(g, queue->notifications);
265 queue->size = 0; 265 queue->size = 0;
266 atomic_set(&queue->head, 0); 266 nvgpu_atomic_set(&queue->head, 0);
267 atomic_set(&queue->tail, 0); 267 nvgpu_atomic_set(&queue->tail, 0);
268} 268}
269 269
270int nvgpu_clk_arb_init_arbiter(struct gk20a *g) 270int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
@@ -346,9 +346,9 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
346 346
347 arb->actual = &arb->actual_pool[0]; 347 arb->actual = &arb->actual_pool[0];
348 348
349 atomic_set(&arb->req_nr, 0); 349 nvgpu_atomic_set(&arb->req_nr, 0);
350 350
351 atomic64_set(&arb->alarm_mask, 0); 351 nvgpu_atomic64_set(&arb->alarm_mask, 0);
352 err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue, 352 err = nvgpu_clk_notification_queue_alloc(g, &arb->notification_queue,
353 DEFAULT_EVENT_NUMBER); 353 DEFAULT_EVENT_NUMBER);
354 if (err < 0) 354 if (err < 0)
@@ -388,8 +388,8 @@ int nvgpu_clk_arb_init_arbiter(struct gk20a *g)
388 /* Check that first run is completed */ 388 /* Check that first run is completed */
389 smp_mb(); 389 smp_mb();
390 wait_event_interruptible(arb->request_wq, 390 wait_event_interruptible(arb->request_wq,
391 atomic_read(&arb->req_nr)); 391 nvgpu_atomic_read(&arb->req_nr));
392 } while (!atomic_read(&arb->req_nr)); 392 } while (!nvgpu_atomic_read(&arb->req_nr));
393 393
394 394
395 return arb->status; 395 return arb->status;
@@ -430,7 +430,7 @@ static void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm)
430 u64 new_mask; 430 u64 new_mask;
431 431
432 do { 432 do {
433 current_mask = atomic64_read(&arb->alarm_mask); 433 current_mask = nvgpu_atomic64_read(&arb->alarm_mask);
434 /* atomic operations are strong so they do not need masks */ 434 /* atomic operations are strong so they do not need masks */
435 435
436 refcnt = ((u32) (current_mask >> 32)) + 1; 436 refcnt = ((u32) (current_mask >> 32)) + 1;
@@ -438,7 +438,7 @@ static void nvgpu_clk_arb_clear_global_alarm(struct gk20a *g, u32 alarm)
438 new_mask = ((u64) refcnt << 32) | alarm_mask; 438 new_mask = ((u64) refcnt << 32) | alarm_mask;
439 439
440 } while (unlikely(current_mask != 440 } while (unlikely(current_mask !=
441 (u64)atomic64_cmpxchg(&arb->alarm_mask, 441 (u64)nvgpu_atomic64_cmpxchg(&arb->alarm_mask,
442 current_mask, new_mask))); 442 current_mask, new_mask)));
443} 443}
444 444
@@ -452,7 +452,7 @@ static void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm)
452 u64 new_mask; 452 u64 new_mask;
453 453
454 do { 454 do {
455 current_mask = atomic64_read(&arb->alarm_mask); 455 current_mask = nvgpu_atomic64_read(&arb->alarm_mask);
456 /* atomic operations are strong so they do not need masks */ 456 /* atomic operations are strong so they do not need masks */
457 457
458 refcnt = ((u32) (current_mask >> 32)) + 1; 458 refcnt = ((u32) (current_mask >> 32)) + 1;
@@ -460,7 +460,7 @@ static void nvgpu_clk_arb_set_global_alarm(struct gk20a *g, u32 alarm)
460 new_mask = ((u64) refcnt << 32) | alarm_mask; 460 new_mask = ((u64) refcnt << 32) | alarm_mask;
461 461
462 } while (unlikely(current_mask != 462 } while (unlikely(current_mask !=
463 (u64)atomic64_cmpxchg(&arb->alarm_mask, 463 (u64)nvgpu_atomic64_cmpxchg(&arb->alarm_mask,
464 current_mask, new_mask))); 464 current_mask, new_mask)));
465 465
466 nvgpu_clk_arb_queue_notification(g, &arb->notification_queue, alarm); 466 nvgpu_clk_arb_queue_notification(g, &arb->notification_queue, alarm);
@@ -537,7 +537,7 @@ static int nvgpu_clk_arb_install_fd(struct gk20a *g,
537 537
538 init_waitqueue_head(&dev->readout_wq); 538 init_waitqueue_head(&dev->readout_wq);
539 539
540 atomic_set(&dev->poll_mask, 0); 540 nvgpu_atomic_set(&dev->poll_mask, 0);
541 541
542 dev->session = session; 542 dev->session = session;
543 kref_init(&dev->refcount); 543 kref_init(&dev->refcount);
@@ -657,11 +657,11 @@ int nvgpu_clk_arb_install_event_fd(struct gk20a *g,
657 * updated 657 * updated
658 */ 658 */
659 if (alarm_mask) 659 if (alarm_mask)
660 atomic_set(&dev->enabled_mask, alarm_mask); 660 nvgpu_atomic_set(&dev->enabled_mask, alarm_mask);
661 else 661 else
662 atomic_set(&dev->enabled_mask, EVENT(VF_UPDATE)); 662 nvgpu_atomic_set(&dev->enabled_mask, EVENT(VF_UPDATE));
663 663
664 dev->arb_queue_head = atomic_read(&arb->notification_queue.head); 664 dev->arb_queue_head = nvgpu_atomic_read(&arb->notification_queue.head);
665 665
666 nvgpu_spinlock_acquire(&arb->users_lock); 666 nvgpu_spinlock_acquire(&arb->users_lock);
667 list_add_tail_rcu(&dev->link, &arb->users); 667 list_add_tail_rcu(&dev->link, &arb->users);
@@ -1056,7 +1056,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1056 gk20a_dbg_fn(""); 1056 gk20a_dbg_fn("");
1057 1057
1058 /* bail out if gpu is down */ 1058 /* bail out if gpu is down */
1059 if (atomic_read(&arb->alarm_mask) & EVENT(ALARM_GPU_LOST)) 1059 if (nvgpu_atomic64_read(&arb->alarm_mask) & EVENT(ALARM_GPU_LOST))
1060 goto exit_arb; 1060 goto exit_arb;
1061 1061
1062#ifdef CONFIG_DEBUG_FS 1062#ifdef CONFIG_DEBUG_FS
@@ -1247,7 +1247,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work)
1247 1247
1248 /* status must be visible before atomic inc */ 1248 /* status must be visible before atomic inc */
1249 smp_wmb(); 1249 smp_wmb();
1250 atomic_inc(&arb->req_nr); 1250 nvgpu_atomic_inc(&arb->req_nr);
1251 1251
1252 /* Unlock pstate change for PG */ 1252 /* Unlock pstate change for PG */
1253 nvgpu_mutex_release(&arb->pstate_lock); 1253 nvgpu_mutex_release(&arb->pstate_lock);
@@ -1298,17 +1298,17 @@ exit_arb:
1298 EVENT(ALARM_CLOCK_ARBITER_FAILED)); 1298 EVENT(ALARM_CLOCK_ARBITER_FAILED));
1299 } 1299 }
1300 1300
1301 current_alarm = (u32) atomic64_read(&arb->alarm_mask); 1301 current_alarm = (u32) nvgpu_atomic64_read(&arb->alarm_mask);
1302 /* notify completion for all requests */ 1302 /* notify completion for all requests */
1303 head = llist_del_all(&arb->requests); 1303 head = llist_del_all(&arb->requests);
1304 llist_for_each_entry_safe(dev, tmp, head, node) { 1304 llist_for_each_entry_safe(dev, tmp, head, node) {
1305 atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); 1305 nvgpu_atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM);
1306 wake_up_interruptible(&dev->readout_wq); 1306 wake_up_interruptible(&dev->readout_wq);
1307 kref_put(&dev->refcount, nvgpu_clk_arb_free_fd); 1307 kref_put(&dev->refcount, nvgpu_clk_arb_free_fd);
1308 } 1308 }
1309 1309
1310 atomic_set(&arb->notification_queue.head, 1310 nvgpu_atomic_set(&arb->notification_queue.head,
1311 atomic_read(&arb->notification_queue.tail)); 1311 nvgpu_atomic_read(&arb->notification_queue.tail));
1312 /* notify event for all users */ 1312 /* notify event for all users */
1313 rcu_read_lock(); 1313 rcu_read_lock();
1314 list_for_each_entry_rcu(dev, &arb->users, link) { 1314 list_for_each_entry_rcu(dev, &arb->users, link) {
@@ -1329,7 +1329,7 @@ static void nvgpu_clk_arb_queue_notification(struct gk20a *g,
1329 u32 queue_index; 1329 u32 queue_index;
1330 u64 timestamp; 1330 u64 timestamp;
1331 1331
1332 queue_index = (atomic_inc_return(&queue->tail)) % queue->size; 1332 queue_index = (nvgpu_atomic_inc_return(&queue->tail)) % queue->size;
1333 /* get current timestamp */ 1333 /* get current timestamp */
1334 timestamp = (u64) sched_clock(); 1334 timestamp = (u64) sched_clock();
1335 1335
@@ -1355,14 +1355,14 @@ static u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
1355 size_t size; 1355 size_t size;
1356 int index; 1356 int index;
1357 1357
1358 enabled_mask = atomic_read(&dev->enabled_mask); 1358 enabled_mask = nvgpu_atomic_read(&dev->enabled_mask);
1359 size = arb->notification_queue.size; 1359 size = arb->notification_queue.size;
1360 1360
1361 /* queue global arbiter notifications in buffer */ 1361 /* queue global arbiter notifications in buffer */
1362 do { 1362 do {
1363 tail = atomic_read(&arb->notification_queue.tail); 1363 tail = nvgpu_atomic_read(&arb->notification_queue.tail);
1364 /* copy items to the queue */ 1364 /* copy items to the queue */
1365 queue_index = atomic_read(&dev->queue.tail); 1365 queue_index = nvgpu_atomic_read(&dev->queue.tail);
1366 head = dev->arb_queue_head; 1366 head = dev->arb_queue_head;
1367 head = (tail - head) < arb->notification_queue.size ? 1367 head = (tail - head) < arb->notification_queue.size ?
1368 head : tail - arb->notification_queue.size; 1368 head : tail - arb->notification_queue.size;
@@ -1389,10 +1389,10 @@ static u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
1389 1389
1390 queue_alarm_mask |= alarm_detected; 1390 queue_alarm_mask |= alarm_detected;
1391 } 1391 }
1392 } while (unlikely(atomic_read(&arb->notification_queue.tail) != 1392 } while (unlikely(nvgpu_atomic_read(&arb->notification_queue.tail) !=
1393 (int)tail)); 1393 (int)tail));
1394 1394
1395 atomic_set(&dev->queue.tail, queue_index); 1395 nvgpu_atomic_set(&dev->queue.tail, queue_index);
1396 /* update the last notification we processed from global queue */ 1396 /* update the last notification we processed from global queue */
1397 1397
1398 dev->arb_queue_head = tail; 1398 dev->arb_queue_head = tail;
@@ -1429,7 +1429,7 @@ static u32 nvgpu_clk_arb_notify(struct nvgpu_clk_dev *dev,
1429 } 1429 }
1430 1430
1431 if (poll_mask) { 1431 if (poll_mask) {
1432 atomic_set(&dev->poll_mask, poll_mask); 1432 nvgpu_atomic_set(&dev->poll_mask, poll_mask);
1433 wake_up_interruptible_all(&dev->readout_wq); 1433 wake_up_interruptible_all(&dev->readout_wq);
1434 } 1434 }
1435 1435
@@ -1454,7 +1454,7 @@ static int nvgpu_clk_arb_set_event_filter(struct nvgpu_clk_dev *dev,
1454 return -EFAULT; 1454 return -EFAULT;
1455 1455
1456 /* update alarm mask */ 1456 /* update alarm mask */
1457 atomic_set(&dev->enabled_mask, mask); 1457 nvgpu_atomic_set(&dev->enabled_mask, mask);
1458 1458
1459 return 0; 1459 return 0;
1460} 1460}
@@ -1539,8 +1539,8 @@ static inline u32 __pending_event(struct nvgpu_clk_dev *dev,
1539 u32 events = 0; 1539 u32 events = 0;
1540 struct nvgpu_clk_notification *p_notif; 1540 struct nvgpu_clk_notification *p_notif;
1541 1541
1542 tail = atomic_read(&dev->queue.tail); 1542 tail = nvgpu_atomic_read(&dev->queue.tail);
1543 head = atomic_read(&dev->queue.head); 1543 head = nvgpu_atomic_read(&dev->queue.head);
1544 1544
1545 head = (tail - head) < dev->queue.size ? head : tail - dev->queue.size; 1545 head = (tail - head) < dev->queue.size ? head : tail - dev->queue.size;
1546 1546
@@ -1550,7 +1550,7 @@ static inline u32 __pending_event(struct nvgpu_clk_dev *dev,
1550 events |= p_notif->notification; 1550 events |= p_notif->notification;
1551 info->event_id = ffs(events) - 1; 1551 info->event_id = ffs(events) - 1;
1552 info->timestamp = p_notif->timestamp; 1552 info->timestamp = p_notif->timestamp;
1553 atomic_set(&dev->queue.head, head); 1553 nvgpu_atomic_set(&dev->queue.head, head);
1554 } 1554 }
1555 1555
1556 return events; 1556 return events;
@@ -1594,7 +1594,7 @@ static unsigned int nvgpu_clk_arb_poll_dev(struct file *filp, poll_table *wait)
1594 gk20a_dbg_fn(""); 1594 gk20a_dbg_fn("");
1595 1595
1596 poll_wait(filp, &dev->readout_wq, wait); 1596 poll_wait(filp, &dev->readout_wq, wait);
1597 return atomic_xchg(&dev->poll_mask, 0); 1597 return nvgpu_atomic_xchg(&dev->poll_mask, 0);
1598} 1598}
1599 1599
1600static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, 1600static int nvgpu_clk_arb_release_completion_dev(struct inode *inode,