diff options
author | Debarshi Dutta <ddutta@nvidia.com> | 2017-08-08 02:38:03 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2017-08-24 04:10:37 -0400 |
commit | 3fa47b877db1edc16018d662e7b9915d92354745 (patch) | |
tree | c1d9a8734e7d92b5ae647fbc3f582a01207a23f6 | |
parent | 8662fae334f2419da2e7fd220f7734217ec52433 (diff) |
gpu: nvgpu: Replace kref for refcounting in nvgpu
- added wrapper struct nvgpu_ref over nvgpu_atomic_t
- added nvgpu_ref_* APIs to access the above struct
JIRA NVGPU-140
Change-Id: Id47f897995dd4721751f7610b6d4d4fbfe4d6b9a
Signed-off-by: Debarshi Dutta <ddutta@nvidia.com>
Reviewed-on: https://git-master.nvidia.com/r/1540899
Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com>
Reviewed-by: svccoveritychecker <svccoveritychecker@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-by: Vijayakumar Subbu <vsubbu@nvidia.com>
25 files changed, 176 insertions, 98 deletions
diff --git a/drivers/gpu/nvgpu/clk/clk_arb.c b/drivers/gpu/nvgpu/clk/clk_arb.c index f1de54c6..fc6d403e 100644 --- a/drivers/gpu/nvgpu/clk/clk_arb.c +++ b/drivers/gpu/nvgpu/clk/clk_arb.c | |||
@@ -61,8 +61,8 @@ static long nvgpu_clk_arb_ioctl_event_dev(struct file *filp, unsigned int cmd, | |||
61 | static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work); | 61 | static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work); |
62 | static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work); | 62 | static void nvgpu_clk_arb_run_vf_table_cb(struct work_struct *work); |
63 | static int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb); | 63 | static int nvgpu_clk_arb_update_vf_table(struct nvgpu_clk_arb *arb); |
64 | static void nvgpu_clk_arb_free_fd(struct kref *refcount); | 64 | static void nvgpu_clk_arb_free_fd(struct nvgpu_ref *refcount); |
65 | static void nvgpu_clk_arb_free_session(struct kref *refcount); | 65 | static void nvgpu_clk_arb_free_session(struct nvgpu_ref *refcount); |
66 | static int nvgpu_clk_arb_change_vf_point(struct gk20a *g, u16 gpc2clk_target, | 66 | static int nvgpu_clk_arb_change_vf_point(struct gk20a *g, u16 gpc2clk_target, |
67 | u16 sys2clk_target, u16 xbar2clk_target, u16 mclk_target, u32 voltuv, | 67 | u16 sys2clk_target, u16 xbar2clk_target, u16 mclk_target, u32 voltuv, |
68 | u32 voltuv_sram); | 68 | u32 voltuv_sram); |
@@ -214,13 +214,13 @@ struct nvgpu_clk_dev { | |||
214 | nvgpu_atomic_t enabled_mask; | 214 | nvgpu_atomic_t enabled_mask; |
215 | struct nvgpu_clk_notification_queue queue; | 215 | struct nvgpu_clk_notification_queue queue; |
216 | u32 arb_queue_head; | 216 | u32 arb_queue_head; |
217 | struct kref refcount; | 217 | struct nvgpu_ref refcount; |
218 | }; | 218 | }; |
219 | 219 | ||
220 | struct nvgpu_clk_session { | 220 | struct nvgpu_clk_session { |
221 | bool zombie; | 221 | bool zombie; |
222 | struct gk20a *g; | 222 | struct gk20a *g; |
223 | struct kref refcount; | 223 | struct nvgpu_ref refcount; |
224 | struct list_head link; | 224 | struct list_head link; |
225 | struct llist_head targets; | 225 | struct llist_head targets; |
226 | 226 | ||
@@ -541,9 +541,9 @@ static int nvgpu_clk_arb_install_fd(struct gk20a *g, | |||
541 | nvgpu_atomic_set(&dev->poll_mask, 0); | 541 | nvgpu_atomic_set(&dev->poll_mask, 0); |
542 | 542 | ||
543 | dev->session = session; | 543 | dev->session = session; |
544 | kref_init(&dev->refcount); | 544 | nvgpu_ref_init(&dev->refcount); |
545 | 545 | ||
546 | kref_get(&session->refcount); | 546 | nvgpu_ref_get(&session->refcount); |
547 | 547 | ||
548 | *_dev = dev; | 548 | *_dev = dev; |
549 | 549 | ||
@@ -573,7 +573,7 @@ int nvgpu_clk_arb_init_session(struct gk20a *g, | |||
573 | return -ENOMEM; | 573 | return -ENOMEM; |
574 | session->g = g; | 574 | session->g = g; |
575 | 575 | ||
576 | kref_init(&session->refcount); | 576 | nvgpu_ref_init(&session->refcount); |
577 | 577 | ||
578 | session->zombie = false; | 578 | session->zombie = false; |
579 | session->target_pool[0].pstate = CTRL_PERF_PSTATE_P8; | 579 | session->target_pool[0].pstate = CTRL_PERF_PSTATE_P8; |
@@ -593,7 +593,7 @@ int nvgpu_clk_arb_init_session(struct gk20a *g, | |||
593 | return 0; | 593 | return 0; |
594 | } | 594 | } |
595 | 595 | ||
596 | static void nvgpu_clk_arb_free_fd(struct kref *refcount) | 596 | static void nvgpu_clk_arb_free_fd(struct nvgpu_ref *refcount) |
597 | { | 597 | { |
598 | struct nvgpu_clk_dev *dev = container_of(refcount, | 598 | struct nvgpu_clk_dev *dev = container_of(refcount, |
599 | struct nvgpu_clk_dev, refcount); | 599 | struct nvgpu_clk_dev, refcount); |
@@ -602,7 +602,7 @@ static void nvgpu_clk_arb_free_fd(struct kref *refcount) | |||
602 | nvgpu_kfree(session->g, dev); | 602 | nvgpu_kfree(session->g, dev); |
603 | } | 603 | } |
604 | 604 | ||
605 | static void nvgpu_clk_arb_free_session(struct kref *refcount) | 605 | static void nvgpu_clk_arb_free_session(struct nvgpu_ref *refcount) |
606 | { | 606 | { |
607 | struct nvgpu_clk_session *session = container_of(refcount, | 607 | struct nvgpu_clk_session *session = container_of(refcount, |
608 | struct nvgpu_clk_session, refcount); | 608 | struct nvgpu_clk_session, refcount); |
@@ -621,7 +621,7 @@ static void nvgpu_clk_arb_free_session(struct kref *refcount) | |||
621 | 621 | ||
622 | head = llist_del_all(&session->targets); | 622 | head = llist_del_all(&session->targets); |
623 | llist_for_each_entry_safe(dev, tmp, head, node) { | 623 | llist_for_each_entry_safe(dev, tmp, head, node) { |
624 | kref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | 624 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); |
625 | } | 625 | } |
626 | synchronize_rcu(); | 626 | synchronize_rcu(); |
627 | nvgpu_kfree(g, session); | 627 | nvgpu_kfree(g, session); |
@@ -635,7 +635,7 @@ void nvgpu_clk_arb_release_session(struct gk20a *g, | |||
635 | gk20a_dbg_fn(""); | 635 | gk20a_dbg_fn(""); |
636 | 636 | ||
637 | session->zombie = true; | 637 | session->zombie = true; |
638 | kref_put(&session->refcount, nvgpu_clk_arb_free_session); | 638 | nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session); |
639 | if (arb && arb->update_work_queue) | 639 | if (arb && arb->update_work_queue) |
640 | queue_work(arb->update_work_queue, &arb->update_fn_work); | 640 | queue_work(arb->update_work_queue, &arb->update_fn_work); |
641 | } | 641 | } |
@@ -1099,7 +1099,7 @@ static void nvgpu_clk_arb_run_arbiter_cb(struct work_struct *work) | |||
1099 | dev->gpc2clk_target_mhz; | 1099 | dev->gpc2clk_target_mhz; |
1100 | gpc2clk_set = true; | 1100 | gpc2clk_set = true; |
1101 | } | 1101 | } |
1102 | kref_get(&dev->refcount); | 1102 | nvgpu_ref_get(&dev->refcount); |
1103 | llist_add(&dev->node, &arb->requests); | 1103 | llist_add(&dev->node, &arb->requests); |
1104 | } | 1104 | } |
1105 | /* Ensure target is updated before ptr sawp */ | 1105 | /* Ensure target is updated before ptr sawp */ |
@@ -1305,7 +1305,7 @@ exit_arb: | |||
1305 | llist_for_each_entry_safe(dev, tmp, head, node) { | 1305 | llist_for_each_entry_safe(dev, tmp, head, node) { |
1306 | nvgpu_atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); | 1306 | nvgpu_atomic_set(&dev->poll_mask, POLLIN | POLLRDNORM); |
1307 | wake_up_interruptible(&dev->readout_wq); | 1307 | wake_up_interruptible(&dev->readout_wq); |
1308 | kref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | 1308 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); |
1309 | } | 1309 | } |
1310 | 1310 | ||
1311 | nvgpu_atomic_set(&arb->notification_queue.head, | 1311 | nvgpu_atomic_set(&arb->notification_queue.head, |
@@ -1523,7 +1523,7 @@ int nvgpu_clk_arb_commit_request_fd(struct gk20a *g, | |||
1523 | err = -EINVAL; | 1523 | err = -EINVAL; |
1524 | goto fdput_fd; | 1524 | goto fdput_fd; |
1525 | } | 1525 | } |
1526 | kref_get(&dev->refcount); | 1526 | nvgpu_ref_get(&dev->refcount); |
1527 | llist_add(&dev->node, &session->targets); | 1527 | llist_add(&dev->node, &session->targets); |
1528 | if (arb->update_work_queue) | 1528 | if (arb->update_work_queue) |
1529 | queue_work(arb->update_work_queue, &arb->update_fn_work); | 1529 | queue_work(arb->update_work_queue, &arb->update_fn_work); |
@@ -1607,8 +1607,8 @@ static int nvgpu_clk_arb_release_completion_dev(struct inode *inode, | |||
1607 | 1607 | ||
1608 | gk20a_dbg_fn(""); | 1608 | gk20a_dbg_fn(""); |
1609 | 1609 | ||
1610 | kref_put(&session->refcount, nvgpu_clk_arb_free_session); | 1610 | nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session); |
1611 | kref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | 1611 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); |
1612 | return 0; | 1612 | return 0; |
1613 | } | 1613 | } |
1614 | 1614 | ||
@@ -1631,8 +1631,8 @@ static int nvgpu_clk_arb_release_event_dev(struct inode *inode, | |||
1631 | } | 1631 | } |
1632 | 1632 | ||
1633 | synchronize_rcu(); | 1633 | synchronize_rcu(); |
1634 | kref_put(&session->refcount, nvgpu_clk_arb_free_session); | 1634 | nvgpu_ref_put(&session->refcount, nvgpu_clk_arb_free_session); |
1635 | kref_put(&dev->refcount, nvgpu_clk_arb_free_fd); | 1635 | nvgpu_ref_put(&dev->refcount, nvgpu_clk_arb_free_fd); |
1636 | 1636 | ||
1637 | return 0; | 1637 | return 0; |
1638 | } | 1638 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/debug_fifo.c b/drivers/gpu/nvgpu/common/linux/debug_fifo.c index 1763eb7e..59198718 100644 --- a/drivers/gpu/nvgpu/common/linux/debug_fifo.c +++ b/drivers/gpu/nvgpu/common/linux/debug_fifo.c | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | #include <nvgpu/sort.h> | 21 | #include <nvgpu/sort.h> |
22 | 22 | ||
23 | void __gk20a_fifo_profile_free(struct kref *ref); | 23 | void __gk20a_fifo_profile_free(struct nvgpu_ref *ref); |
24 | 24 | ||
25 | static void *gk20a_fifo_sched_debugfs_seq_start( | 25 | static void *gk20a_fifo_sched_debugfs_seq_start( |
26 | struct seq_file *s, loff_t *pos) | 26 | struct seq_file *s, loff_t *pos) |
@@ -145,14 +145,15 @@ static int gk20a_fifo_profile_enable(void *data, u64 val) | |||
145 | if (val == 0) { | 145 | if (val == 0) { |
146 | if (f->profile.enabled) { | 146 | if (f->profile.enabled) { |
147 | f->profile.enabled = false; | 147 | f->profile.enabled = false; |
148 | kref_put(&f->profile.ref, __gk20a_fifo_profile_free); | 148 | nvgpu_ref_put(&f->profile.ref, |
149 | __gk20a_fifo_profile_free); | ||
149 | } | 150 | } |
150 | } else { | 151 | } else { |
151 | if (!f->profile.enabled) { | 152 | if (!f->profile.enabled) { |
152 | /* not kref init as it can have a running condition if | 153 | /* not kref init as it can have a running condition if |
153 | * we enable/disable/enable while kickoff is happening | 154 | * we enable/disable/enable while kickoff is happening |
154 | */ | 155 | */ |
155 | if (!kref_get_unless_zero(&f->profile.ref)) { | 156 | if (!nvgpu_ref_get_unless_zero(&f->profile.ref)) { |
156 | f->profile.data = vzalloc( | 157 | f->profile.data = vzalloc( |
157 | FIFO_PROFILING_ENTRIES * | 158 | FIFO_PROFILING_ENTRIES * |
158 | sizeof(struct fifo_profile_gk20a)); | 159 | sizeof(struct fifo_profile_gk20a)); |
@@ -165,7 +166,7 @@ static int gk20a_fifo_profile_enable(void *data, u64 val) | |||
165 | nvgpu_mutex_release(&f->profile.lock); | 166 | nvgpu_mutex_release(&f->profile.lock); |
166 | return -ENOMEM; | 167 | return -ENOMEM; |
167 | } | 168 | } |
168 | kref_init(&f->profile.ref); | 169 | nvgpu_ref_init(&f->profile.ref); |
169 | } | 170 | } |
170 | atomic_set(&f->profile.get.atomic_var, 0); | 171 | atomic_set(&f->profile.get.atomic_var, 0); |
171 | f->profile.enabled = true; | 172 | f->profile.enabled = true; |
@@ -241,7 +242,7 @@ static int gk20a_fifo_profile_stats(struct seq_file *s, void *unused) | |||
241 | u64 percentiles_append[PERCENTILE_RANGES]; | 242 | u64 percentiles_append[PERCENTILE_RANGES]; |
242 | u64 percentiles_userd[PERCENTILE_RANGES]; | 243 | u64 percentiles_userd[PERCENTILE_RANGES]; |
243 | 244 | ||
244 | if (!kref_get_unless_zero(&g->fifo.profile.ref)) { | 245 | if (!nvgpu_ref_get_unless_zero(&g->fifo.profile.ref)) { |
245 | seq_printf(s, "Profiling disabled\n"); | 246 | seq_printf(s, "Profiling disabled\n"); |
246 | return 0; | 247 | return 0; |
247 | } | 248 | } |
@@ -271,7 +272,7 @@ static int gk20a_fifo_profile_stats(struct seq_file *s, void *unused) | |||
271 | percentiles_jobtracking[index], | 272 | percentiles_jobtracking[index], |
272 | percentiles_userd[index]); | 273 | percentiles_userd[index]); |
273 | 274 | ||
274 | kref_put(&g->fifo.profile.ref, __gk20a_fifo_profile_free); | 275 | nvgpu_ref_put(&g->fifo.profile.ref, __gk20a_fifo_profile_free); |
275 | 276 | ||
276 | return 0; | 277 | return 0; |
277 | } | 278 | } |
@@ -312,7 +313,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g) | |||
312 | nvgpu_mutex_init(&g->fifo.profile.lock); | 313 | nvgpu_mutex_init(&g->fifo.profile.lock); |
313 | g->fifo.profile.enabled = false; | 314 | g->fifo.profile.enabled = false; |
314 | atomic_set(&g->fifo.profile.get.atomic_var, 0); | 315 | atomic_set(&g->fifo.profile.get.atomic_var, 0); |
315 | atomic_set(&g->fifo.profile.ref.refcount, 0); | 316 | atomic_set(&g->fifo.profile.ref.refcount.atomic_var, 0); |
316 | 317 | ||
317 | debugfs_create_file("enable", 0600, profile_root, g, | 318 | debugfs_create_file("enable", 0600, profile_root, g, |
318 | &gk20a_fifo_profile_enable_debugfs_fops); | 319 | &gk20a_fifo_profile_enable_debugfs_fops); |
@@ -322,7 +323,7 @@ void gk20a_fifo_debugfs_init(struct gk20a *g) | |||
322 | 323 | ||
323 | } | 324 | } |
324 | 325 | ||
325 | void __gk20a_fifo_profile_free(struct kref *ref) | 326 | void __gk20a_fifo_profile_free(struct nvgpu_ref *ref) |
326 | { | 327 | { |
327 | struct fifo_gk20a *f = container_of(ref, struct fifo_gk20a, | 328 | struct fifo_gk20a *f = container_of(ref, struct fifo_gk20a, |
328 | profile.ref); | 329 | profile.ref); |
@@ -340,7 +341,7 @@ struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g) | |||
340 | unsigned int index; | 341 | unsigned int index; |
341 | 342 | ||
342 | /* If kref is zero, profiling is not enabled */ | 343 | /* If kref is zero, profiling is not enabled */ |
343 | if (!kref_get_unless_zero(&f->profile.ref)) | 344 | if (!nvgpu_ref_get_unless_zero(&f->profile.ref)) |
344 | return NULL; | 345 | return NULL; |
345 | index = atomic_inc_return(&f->profile.get.atomic_var); | 346 | index = atomic_inc_return(&f->profile.get.atomic_var); |
346 | profile = &f->profile.data[index % FIFO_PROFILING_ENTRIES]; | 347 | profile = &f->profile.data[index % FIFO_PROFILING_ENTRIES]; |
@@ -352,7 +353,7 @@ struct fifo_profile_gk20a *gk20a_fifo_profile_acquire(struct gk20a *g) | |||
352 | void gk20a_fifo_profile_release(struct gk20a *g, | 353 | void gk20a_fifo_profile_release(struct gk20a *g, |
353 | struct fifo_profile_gk20a *profile) | 354 | struct fifo_profile_gk20a *profile) |
354 | { | 355 | { |
355 | kref_put(&g->fifo.profile.ref, __gk20a_fifo_profile_free); | 356 | nvgpu_ref_put(&g->fifo.profile.ref, __gk20a_fifo_profile_free); |
356 | } | 357 | } |
357 | 358 | ||
358 | void gk20a_fifo_debugfs_deinit(struct gk20a *g) | 359 | void gk20a_fifo_debugfs_deinit(struct gk20a *g) |
@@ -362,7 +363,7 @@ void gk20a_fifo_debugfs_deinit(struct gk20a *g) | |||
362 | nvgpu_mutex_acquire(&f->profile.lock); | 363 | nvgpu_mutex_acquire(&f->profile.lock); |
363 | if (f->profile.enabled) { | 364 | if (f->profile.enabled) { |
364 | f->profile.enabled = false; | 365 | f->profile.enabled = false; |
365 | kref_put(&f->profile.ref, __gk20a_fifo_profile_free); | 366 | nvgpu_ref_put(&f->profile.ref, __gk20a_fifo_profile_free); |
366 | } | 367 | } |
367 | nvgpu_mutex_release(&f->profile.lock); | 368 | nvgpu_mutex_release(&f->profile.lock); |
368 | } | 369 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/driver_common.c b/drivers/gpu/nvgpu/common/linux/driver_common.c index d1905c86..e8530c05 100644 --- a/drivers/gpu/nvgpu/common/linux/driver_common.c +++ b/drivers/gpu/nvgpu/common/linux/driver_common.c | |||
@@ -217,7 +217,7 @@ int nvgpu_probe(struct gk20a *g, | |||
217 | 217 | ||
218 | g->remove_support = gk20a_remove_support; | 218 | g->remove_support = gk20a_remove_support; |
219 | 219 | ||
220 | kref_init(&g->refcount); | 220 | nvgpu_ref_init(&g->refcount); |
221 | 221 | ||
222 | return 0; | 222 | return 0; |
223 | } | 223 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c index cb876e23..c68c907e 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_tsg.c | |||
@@ -260,7 +260,7 @@ int nvgpu_ioctl_tsg_dev_release(struct inode *inode, struct file *filp) | |||
260 | struct tsg_private *priv = filp->private_data; | 260 | struct tsg_private *priv = filp->private_data; |
261 | struct tsg_gk20a *tsg = priv->tsg; | 261 | struct tsg_gk20a *tsg = priv->tsg; |
262 | 262 | ||
263 | kref_put(&tsg->refcount, gk20a_tsg_release); | 263 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
264 | nvgpu_kfree(tsg->g, priv); | 264 | nvgpu_kfree(tsg->g, priv); |
265 | return 0; | 265 | return 0; |
266 | } | 266 | } |
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index 3d1219b6..c84f531d 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c | |||
@@ -155,7 +155,7 @@ static u64 __nvgpu_vm_find_mapping(struct vm_gk20a *vm, | |||
155 | else | 155 | else |
156 | mapped_buffer->own_mem_ref = true; | 156 | mapped_buffer->own_mem_ref = true; |
157 | } | 157 | } |
158 | kref_get(&mapped_buffer->ref); | 158 | nvgpu_ref_get(&mapped_buffer->ref); |
159 | 159 | ||
160 | nvgpu_log(g, gpu_dbg_map, | 160 | nvgpu_log(g, gpu_dbg_map, |
161 | "gv: 0x%04x_%08x + 0x%-7zu " | 161 | "gv: 0x%04x_%08x + 0x%-7zu " |
@@ -380,7 +380,7 @@ u64 nvgpu_vm_map(struct vm_gk20a *vm, | |||
380 | mapped_buffer->user_mapped = user_mapped ? 1 : 0; | 380 | mapped_buffer->user_mapped = user_mapped ? 1 : 0; |
381 | mapped_buffer->own_mem_ref = user_mapped; | 381 | mapped_buffer->own_mem_ref = user_mapped; |
382 | nvgpu_init_list_node(&mapped_buffer->buffer_list); | 382 | nvgpu_init_list_node(&mapped_buffer->buffer_list); |
383 | kref_init(&mapped_buffer->ref); | 383 | nvgpu_ref_init(&mapped_buffer->ref); |
384 | 384 | ||
385 | err = nvgpu_insert_mapped_buf(vm, mapped_buffer); | 385 | err = nvgpu_insert_mapped_buf(vm, mapped_buffer); |
386 | if (err) { | 386 | if (err) { |
@@ -425,6 +425,6 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) | |||
425 | return; | 425 | return; |
426 | } | 426 | } |
427 | 427 | ||
428 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); | 428 | nvgpu_ref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_ref); |
429 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 429 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
430 | } | 430 | } |
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 3ed3c7fe..2e2f52df 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -404,7 +404,7 @@ static int __nvgpu_vm_init(struct mm_gk20a *mm, | |||
404 | vm->mapped_buffers = NULL; | 404 | vm->mapped_buffers = NULL; |
405 | 405 | ||
406 | nvgpu_mutex_init(&vm->update_gmmu_lock); | 406 | nvgpu_mutex_init(&vm->update_gmmu_lock); |
407 | kref_init(&vm->ref); | 407 | nvgpu_ref_init(&vm->ref); |
408 | nvgpu_init_list_node(&vm->vm_area_list); | 408 | nvgpu_init_list_node(&vm->vm_area_list); |
409 | 409 | ||
410 | /* | 410 | /* |
@@ -557,7 +557,7 @@ static void __nvgpu_vm_remove(struct vm_gk20a *vm) | |||
557 | nvgpu_kfree(g, vm); | 557 | nvgpu_kfree(g, vm); |
558 | } | 558 | } |
559 | 559 | ||
560 | static void __nvgpu_vm_remove_kref(struct kref *ref) | 560 | static void __nvgpu_vm_remove_ref(struct nvgpu_ref *ref) |
561 | { | 561 | { |
562 | struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); | 562 | struct vm_gk20a *vm = container_of(ref, struct vm_gk20a, ref); |
563 | 563 | ||
@@ -566,12 +566,12 @@ static void __nvgpu_vm_remove_kref(struct kref *ref) | |||
566 | 566 | ||
567 | void nvgpu_vm_get(struct vm_gk20a *vm) | 567 | void nvgpu_vm_get(struct vm_gk20a *vm) |
568 | { | 568 | { |
569 | kref_get(&vm->ref); | 569 | nvgpu_ref_get(&vm->ref); |
570 | } | 570 | } |
571 | 571 | ||
572 | void nvgpu_vm_put(struct vm_gk20a *vm) | 572 | void nvgpu_vm_put(struct vm_gk20a *vm) |
573 | { | 573 | { |
574 | kref_put(&vm->ref, __nvgpu_vm_remove_kref); | 574 | nvgpu_ref_put(&vm->ref, __nvgpu_vm_remove_ref); |
575 | } | 575 | } |
576 | 576 | ||
577 | int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, | 577 | int nvgpu_insert_mapped_buf(struct vm_gk20a *vm, |
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index 7b831947..19864439 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -202,7 +202,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) | |||
202 | &vm_area->buffer_list_head, | 202 | &vm_area->buffer_list_head, |
203 | nvgpu_mapped_buf, buffer_list) { | 203 | nvgpu_mapped_buf, buffer_list) { |
204 | nvgpu_list_del(&buffer->buffer_list); | 204 | nvgpu_list_del(&buffer->buffer_list); |
205 | kref_put(&buffer->ref, gk20a_vm_unmap_locked_kref); | 205 | nvgpu_ref_put(&buffer->ref, gk20a_vm_unmap_locked_ref); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* if this was a sparse mapping, free the va */ | 208 | /* if this was a sparse mapping, free the va */ |
diff --git a/drivers/gpu/nvgpu/common/semaphore.c b/drivers/gpu/nvgpu/common/semaphore.c index ac45aaaa..5496f5ec 100644 --- a/drivers/gpu/nvgpu/common/semaphore.c +++ b/drivers/gpu/nvgpu/common/semaphore.c | |||
@@ -156,7 +156,7 @@ struct nvgpu_semaphore_pool *nvgpu_semaphore_pool_alloc( | |||
156 | p->sema_sea = sea; | 156 | p->sema_sea = sea; |
157 | nvgpu_init_list_node(&p->hw_semas); | 157 | nvgpu_init_list_node(&p->hw_semas); |
158 | nvgpu_init_list_node(&p->pool_list_entry); | 158 | nvgpu_init_list_node(&p->pool_list_entry); |
159 | kref_init(&p->ref); | 159 | nvgpu_ref_init(&p->ref); |
160 | 160 | ||
161 | sea->page_count++; | 161 | sea->page_count++; |
162 | nvgpu_list_add(&p->pool_list_entry, &sea->pool_list); | 162 | nvgpu_list_add(&p->pool_list_entry, &sea->pool_list); |
@@ -285,7 +285,7 @@ void nvgpu_semaphore_pool_unmap(struct nvgpu_semaphore_pool *p, | |||
285 | * Completely free a semaphore_pool. You should make sure this pool is not | 285 | * Completely free a semaphore_pool. You should make sure this pool is not |
286 | * mapped otherwise there's going to be a memory leak. | 286 | * mapped otherwise there's going to be a memory leak. |
287 | */ | 287 | */ |
288 | static void nvgpu_semaphore_pool_free(struct kref *ref) | 288 | static void nvgpu_semaphore_pool_free(struct nvgpu_ref *ref) |
289 | { | 289 | { |
290 | struct nvgpu_semaphore_pool *p = | 290 | struct nvgpu_semaphore_pool *p = |
291 | container_of(ref, struct nvgpu_semaphore_pool, ref); | 291 | container_of(ref, struct nvgpu_semaphore_pool, ref); |
@@ -314,12 +314,12 @@ static void nvgpu_semaphore_pool_free(struct kref *ref) | |||
314 | 314 | ||
315 | void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p) | 315 | void nvgpu_semaphore_pool_get(struct nvgpu_semaphore_pool *p) |
316 | { | 316 | { |
317 | kref_get(&p->ref); | 317 | nvgpu_ref_get(&p->ref); |
318 | } | 318 | } |
319 | 319 | ||
320 | void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p) | 320 | void nvgpu_semaphore_pool_put(struct nvgpu_semaphore_pool *p) |
321 | { | 321 | { |
322 | kref_put(&p->ref, nvgpu_semaphore_pool_free); | 322 | nvgpu_ref_put(&p->ref, nvgpu_semaphore_pool_free); |
323 | } | 323 | } |
324 | 324 | ||
325 | /* | 325 | /* |
@@ -423,7 +423,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch) | |||
423 | if (!s) | 423 | if (!s) |
424 | return NULL; | 424 | return NULL; |
425 | 425 | ||
426 | kref_init(&s->ref); | 426 | nvgpu_ref_init(&s->ref); |
427 | s->hw_sema = ch->hw_sema; | 427 | s->hw_sema = ch->hw_sema; |
428 | nvgpu_atomic_set(&s->value, 0); | 428 | nvgpu_atomic_set(&s->value, 0); |
429 | 429 | ||
@@ -438,7 +438,7 @@ struct nvgpu_semaphore *nvgpu_semaphore_alloc(struct channel_gk20a *ch) | |||
438 | return s; | 438 | return s; |
439 | } | 439 | } |
440 | 440 | ||
441 | static void nvgpu_semaphore_free(struct kref *ref) | 441 | static void nvgpu_semaphore_free(struct nvgpu_ref *ref) |
442 | { | 442 | { |
443 | struct nvgpu_semaphore *s = | 443 | struct nvgpu_semaphore *s = |
444 | container_of(ref, struct nvgpu_semaphore, ref); | 444 | container_of(ref, struct nvgpu_semaphore, ref); |
@@ -450,10 +450,10 @@ static void nvgpu_semaphore_free(struct kref *ref) | |||
450 | 450 | ||
451 | void nvgpu_semaphore_put(struct nvgpu_semaphore *s) | 451 | void nvgpu_semaphore_put(struct nvgpu_semaphore *s) |
452 | { | 452 | { |
453 | kref_put(&s->ref, nvgpu_semaphore_free); | 453 | nvgpu_ref_put(&s->ref, nvgpu_semaphore_free); |
454 | } | 454 | } |
455 | 455 | ||
456 | void nvgpu_semaphore_get(struct nvgpu_semaphore *s) | 456 | void nvgpu_semaphore_get(struct nvgpu_semaphore *s) |
457 | { | 457 | { |
458 | kref_get(&s->ref); | 458 | nvgpu_ref_get(&s->ref); |
459 | } | 459 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c index a7250b17..fdfef3da 100644 --- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.c | |||
@@ -33,10 +33,10 @@ | |||
33 | struct gk20a_fence_ops { | 33 | struct gk20a_fence_ops { |
34 | int (*wait)(struct gk20a_fence *, long timeout); | 34 | int (*wait)(struct gk20a_fence *, long timeout); |
35 | bool (*is_expired)(struct gk20a_fence *); | 35 | bool (*is_expired)(struct gk20a_fence *); |
36 | void *(*free)(struct kref *); | 36 | void *(*free)(struct nvgpu_ref *); |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static void gk20a_fence_free(struct kref *ref) | 39 | static void gk20a_fence_free(struct nvgpu_ref *ref) |
40 | { | 40 | { |
41 | struct gk20a_fence *f = | 41 | struct gk20a_fence *f = |
42 | container_of(ref, struct gk20a_fence, ref); | 42 | container_of(ref, struct gk20a_fence, ref); |
@@ -59,13 +59,13 @@ static void gk20a_fence_free(struct kref *ref) | |||
59 | void gk20a_fence_put(struct gk20a_fence *f) | 59 | void gk20a_fence_put(struct gk20a_fence *f) |
60 | { | 60 | { |
61 | if (f) | 61 | if (f) |
62 | kref_put(&f->ref, gk20a_fence_free); | 62 | nvgpu_ref_put(&f->ref, gk20a_fence_free); |
63 | } | 63 | } |
64 | 64 | ||
65 | struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f) | 65 | struct gk20a_fence *gk20a_fence_get(struct gk20a_fence *f) |
66 | { | 66 | { |
67 | if (f) | 67 | if (f) |
68 | kref_get(&f->ref); | 68 | nvgpu_ref_get(&f->ref); |
69 | return f; | 69 | return f; |
70 | } | 70 | } |
71 | 71 | ||
@@ -175,7 +175,7 @@ struct gk20a_fence *gk20a_alloc_fence(struct channel_gk20a *c) | |||
175 | fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence)); | 175 | fence = nvgpu_kzalloc(c->g, sizeof(struct gk20a_fence)); |
176 | 176 | ||
177 | if (fence) { | 177 | if (fence) { |
178 | kref_init(&fence->ref); | 178 | nvgpu_ref_init(&fence->ref); |
179 | fence->g = c->g; | 179 | fence->g = c->g; |
180 | } | 180 | } |
181 | 181 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h index 140f5488..e0eb09b6 100644 --- a/drivers/gpu/nvgpu/gk20a/fence_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fence_gk20a.h | |||
@@ -33,7 +33,7 @@ struct gk20a_fence { | |||
33 | 33 | ||
34 | /* Valid for all fence types: */ | 34 | /* Valid for all fence types: */ |
35 | bool valid; | 35 | bool valid; |
36 | struct kref ref; | 36 | struct nvgpu_ref ref; |
37 | bool wfi; | 37 | bool wfi; |
38 | struct sync_fence *sync_fence; | 38 | struct sync_fence *sync_fence; |
39 | const struct gk20a_fence_ops *ops; | 39 | const struct gk20a_fence_ops *ops; |
diff --git a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h index a6eae8ca..fb4932c8 100644 --- a/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/fifo_gk20a.h | |||
@@ -159,7 +159,7 @@ struct fifo_gk20a { | |||
159 | nvgpu_atomic_t get; | 159 | nvgpu_atomic_t get; |
160 | bool enabled; | 160 | bool enabled; |
161 | u64 *sorted; | 161 | u64 *sorted; |
162 | struct kref ref; | 162 | struct nvgpu_ref ref; |
163 | struct nvgpu_mutex lock; | 163 | struct nvgpu_mutex lock; |
164 | } profile; | 164 | } profile; |
165 | #endif | 165 | #endif |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.c b/drivers/gpu/nvgpu/gk20a/gk20a.c index 550b22c0..639ec4b5 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/gk20a.c | |||
@@ -511,7 +511,7 @@ int gk20a_init_gpu_characteristics(struct gk20a *g) | |||
511 | /* | 511 | /* |
512 | * Free the gk20a struct. | 512 | * Free the gk20a struct. |
513 | */ | 513 | */ |
514 | static void gk20a_free_cb(struct kref *refcount) | 514 | static void gk20a_free_cb(struct nvgpu_ref *refcount) |
515 | { | 515 | { |
516 | struct gk20a *g = container_of(refcount, | 516 | struct gk20a *g = container_of(refcount, |
517 | struct gk20a, refcount); | 517 | struct gk20a, refcount); |
@@ -544,10 +544,11 @@ struct gk20a * __must_check gk20a_get(struct gk20a *g) | |||
544 | * the code will never be in such a situation that this race is | 544 | * the code will never be in such a situation that this race is |
545 | * possible. | 545 | * possible. |
546 | */ | 546 | */ |
547 | success = kref_get_unless_zero(&g->refcount); | 547 | success = nvgpu_ref_get_unless_zero(&g->refcount); |
548 | 548 | ||
549 | gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", | 549 | gk20a_dbg(gpu_dbg_shutdown, "GET: refs currently %d %s", |
550 | atomic_read(&g->refcount.refcount), success ? "" : "(FAILED)"); | 550 | nvgpu_atomic_read(&g->refcount.refcount), |
551 | success ? "" : "(FAILED)"); | ||
551 | 552 | ||
552 | return success ? g : NULL; | 553 | return success ? g : NULL; |
553 | } | 554 | } |
@@ -571,7 +572,7 @@ void gk20a_put(struct gk20a *g) | |||
571 | * ... Freeing GK20A struct! | 572 | * ... Freeing GK20A struct! |
572 | */ | 573 | */ |
573 | gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", | 574 | gk20a_dbg(gpu_dbg_shutdown, "PUT: refs currently %d", |
574 | atomic_read(&g->refcount.refcount)); | 575 | nvgpu_atomic_read(&g->refcount.refcount)); |
575 | 576 | ||
576 | kref_put(&g->refcount, gk20a_free_cb); | 577 | nvgpu_ref_put(&g->refcount, gk20a_free_cb); |
577 | } | 578 | } |
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a.h b/drivers/gpu/nvgpu/gk20a/gk20a.h index 5efa846d..15e81291 100644 --- a/drivers/gpu/nvgpu/gk20a/gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/gk20a.h | |||
@@ -1036,7 +1036,7 @@ struct gk20a { | |||
1036 | 1036 | ||
1037 | nvgpu_atomic_t usage_count; | 1037 | nvgpu_atomic_t usage_count; |
1038 | 1038 | ||
1039 | struct kref refcount; | 1039 | struct nvgpu_ref refcount; |
1040 | 1040 | ||
1041 | struct resource *reg_mem; | 1041 | struct resource *reg_mem; |
1042 | void __iomem *regs; | 1042 | void __iomem *regs; |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c index 2ce78cef..3030c170 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c | |||
@@ -811,7 +811,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | |||
811 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | 811 | mapped_buffer = mapped_buffer_from_rbtree_node(node); |
812 | if (mapped_buffer->user_mapped) { | 812 | if (mapped_buffer->user_mapped) { |
813 | buffer_list[i] = mapped_buffer; | 813 | buffer_list[i] = mapped_buffer; |
814 | kref_get(&mapped_buffer->ref); | 814 | nvgpu_ref_get(&mapped_buffer->ref); |
815 | i++; | 815 | i++; |
816 | } | 816 | } |
817 | nvgpu_rbtree_enum_next(&node, node); | 817 | nvgpu_rbtree_enum_next(&node, node); |
@@ -827,7 +827,7 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | |||
827 | return 0; | 827 | return 0; |
828 | } | 828 | } |
829 | 829 | ||
830 | void gk20a_vm_unmap_locked_kref(struct kref *ref) | 830 | void gk20a_vm_unmap_locked_ref(struct nvgpu_ref *ref) |
831 | { | 831 | { |
832 | struct nvgpu_mapped_buf *mapped_buffer = | 832 | struct nvgpu_mapped_buf *mapped_buffer = |
833 | container_of(ref, struct nvgpu_mapped_buf, ref); | 833 | container_of(ref, struct nvgpu_mapped_buf, ref); |
@@ -849,8 +849,8 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | |||
849 | vm->kref_put_batch = &batch; | 849 | vm->kref_put_batch = &batch; |
850 | 850 | ||
851 | for (i = 0; i < num_buffers; ++i) | 851 | for (i = 0; i < num_buffers; ++i) |
852 | kref_put(&mapped_buffers[i]->ref, | 852 | nvgpu_ref_put(&mapped_buffers[i]->ref, |
853 | gk20a_vm_unmap_locked_kref); | 853 | gk20a_vm_unmap_locked_ref); |
854 | 854 | ||
855 | vm->kref_put_batch = NULL; | 855 | vm->kref_put_batch = NULL; |
856 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); | 856 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); |
@@ -882,8 +882,9 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
882 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, | 882 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, |
883 | NVGPU_TIMER_RETRY_TIMER); | 883 | NVGPU_TIMER_RETRY_TIMER); |
884 | do { | 884 | do { |
885 | if (atomic_read(&mapped_buffer->ref.refcount) == 1) | 885 | if (nvgpu_atomic_read( |
886 | break; | 886 | &mapped_buffer->ref.refcount) == 1) |
887 | break; | ||
887 | nvgpu_udelay(5); | 888 | nvgpu_udelay(5); |
888 | } while (!nvgpu_timeout_expired_msg(&timeout, | 889 | } while (!nvgpu_timeout_expired_msg(&timeout, |
889 | "sync-unmap failed on 0x%llx")); | 890 | "sync-unmap failed on 0x%llx")); |
@@ -902,7 +903,7 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | |||
902 | vm->num_user_mapped_buffers--; | 903 | vm->num_user_mapped_buffers--; |
903 | 904 | ||
904 | vm->kref_put_batch = batch; | 905 | vm->kref_put_batch = batch; |
905 | kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); | 906 | nvgpu_ref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_ref); |
906 | vm->kref_put_batch = NULL; | 907 | vm->kref_put_batch = NULL; |
907 | 908 | ||
908 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 909 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h index e8b90c8f..82a4ee85 100644 --- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.h | |||
@@ -434,6 +434,6 @@ extern const struct gk20a_mmu_level gk20a_mm_levels_128k[]; | |||
434 | 434 | ||
435 | int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd, | 435 | int gk20a_mm_get_buffer_info(struct device *dev, int dmabuf_fd, |
436 | u64 *buffer_id, u64 *buffer_len); | 436 | u64 *buffer_id, u64 *buffer_len); |
437 | void gk20a_vm_unmap_locked_kref(struct kref *ref); | 437 | void gk20a_vm_unmap_locked_ref(struct nvgpu_ref *ref); |
438 | 438 | ||
439 | #endif /* MM_GK20A_H */ | 439 | #endif /* MM_GK20A_H */ |
diff --git a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c index 014848ba..ac54addd 100644 --- a/drivers/gpu/nvgpu/gk20a/sched_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sched_gk20a.c | |||
@@ -189,7 +189,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched, | |||
189 | return -EINVAL; | 189 | return -EINVAL; |
190 | 190 | ||
191 | tsg = &f->tsg[tsgid]; | 191 | tsg = &f->tsg[tsgid]; |
192 | if (!kref_get_unless_zero(&tsg->refcount)) | 192 | if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) |
193 | return -ENXIO; | 193 | return -ENXIO; |
194 | 194 | ||
195 | arg->pid = tsg->tgid; /* kernel tgid corresponds to user pid */ | 195 | arg->pid = tsg->tgid; /* kernel tgid corresponds to user pid */ |
@@ -206,7 +206,7 @@ static int gk20a_sched_dev_ioctl_get_params(struct gk20a_sched_ctrl *sched, | |||
206 | arg->compute_preempt_mode = 0; | 206 | arg->compute_preempt_mode = 0; |
207 | } | 207 | } |
208 | 208 | ||
209 | kref_put(&tsg->refcount, gk20a_tsg_release); | 209 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
210 | 210 | ||
211 | return 0; | 211 | return 0; |
212 | } | 212 | } |
@@ -227,7 +227,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice( | |||
227 | return -EINVAL; | 227 | return -EINVAL; |
228 | 228 | ||
229 | tsg = &f->tsg[tsgid]; | 229 | tsg = &f->tsg[tsgid]; |
230 | if (!kref_get_unless_zero(&tsg->refcount)) | 230 | if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) |
231 | return -ENXIO; | 231 | return -ENXIO; |
232 | 232 | ||
233 | err = gk20a_busy(g); | 233 | err = gk20a_busy(g); |
@@ -239,7 +239,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_timeslice( | |||
239 | gk20a_idle(g); | 239 | gk20a_idle(g); |
240 | 240 | ||
241 | done: | 241 | done: |
242 | kref_put(&tsg->refcount, gk20a_tsg_release); | 242 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
243 | 243 | ||
244 | return err; | 244 | return err; |
245 | } | 245 | } |
@@ -260,7 +260,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave( | |||
260 | return -EINVAL; | 260 | return -EINVAL; |
261 | 261 | ||
262 | tsg = &f->tsg[tsgid]; | 262 | tsg = &f->tsg[tsgid]; |
263 | if (!kref_get_unless_zero(&tsg->refcount)) | 263 | if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) |
264 | return -ENXIO; | 264 | return -ENXIO; |
265 | 265 | ||
266 | err = gk20a_busy(g); | 266 | err = gk20a_busy(g); |
@@ -272,7 +272,7 @@ static int gk20a_sched_dev_ioctl_tsg_set_runlist_interleave( | |||
272 | gk20a_idle(g); | 272 | gk20a_idle(g); |
273 | 273 | ||
274 | done: | 274 | done: |
275 | kref_put(&tsg->refcount, gk20a_tsg_release); | 275 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
276 | 276 | ||
277 | return err; | 277 | return err; |
278 | } | 278 | } |
@@ -320,7 +320,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, | |||
320 | return -EINVAL; | 320 | return -EINVAL; |
321 | 321 | ||
322 | tsg = &f->tsg[tsgid]; | 322 | tsg = &f->tsg[tsgid]; |
323 | if (!kref_get_unless_zero(&tsg->refcount)) | 323 | if (!nvgpu_ref_get_unless_zero(&tsg->refcount)) |
324 | return -ENXIO; | 324 | return -ENXIO; |
325 | 325 | ||
326 | nvgpu_mutex_acquire(&sched->status_lock); | 326 | nvgpu_mutex_acquire(&sched->status_lock); |
@@ -328,7 +328,7 @@ static int gk20a_sched_dev_ioctl_get_tsg(struct gk20a_sched_ctrl *sched, | |||
328 | nvgpu_warn(g, "tsgid=%d already referenced", tsgid); | 328 | nvgpu_warn(g, "tsgid=%d already referenced", tsgid); |
329 | /* unlock status_lock as gk20a_tsg_release locks it */ | 329 | /* unlock status_lock as gk20a_tsg_release locks it */ |
330 | nvgpu_mutex_release(&sched->status_lock); | 330 | nvgpu_mutex_release(&sched->status_lock); |
331 | kref_put(&tsg->refcount, gk20a_tsg_release); | 331 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
332 | return -ENXIO; | 332 | return -ENXIO; |
333 | } | 333 | } |
334 | 334 | ||
@@ -364,7 +364,7 @@ static int gk20a_sched_dev_ioctl_put_tsg(struct gk20a_sched_ctrl *sched, | |||
364 | nvgpu_mutex_release(&sched->status_lock); | 364 | nvgpu_mutex_release(&sched->status_lock); |
365 | 365 | ||
366 | tsg = &f->tsg[tsgid]; | 366 | tsg = &f->tsg[tsgid]; |
367 | kref_put(&tsg->refcount, gk20a_tsg_release); | 367 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
368 | 368 | ||
369 | return 0; | 369 | return 0; |
370 | } | 370 | } |
@@ -507,7 +507,7 @@ int gk20a_sched_dev_release(struct inode *inode, struct file *filp) | |||
507 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { | 507 | for (tsgid = 0; tsgid < f->num_channels; tsgid++) { |
508 | if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { | 508 | if (NVGPU_SCHED_ISSET(tsgid, sched->ref_tsg_bitmap)) { |
509 | tsg = &f->tsg[tsgid]; | 509 | tsg = &f->tsg[tsgid]; |
510 | kref_put(&tsg->refcount, gk20a_tsg_release); | 510 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
511 | } | 511 | } |
512 | } | 512 | } |
513 | 513 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c index deaf19a1..a8e824b6 100644 --- a/drivers/gpu/nvgpu/gk20a/sync_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/sync_gk20a.c | |||
@@ -43,7 +43,7 @@ struct gk20a_sync_timeline { | |||
43 | */ | 43 | */ |
44 | struct gk20a_sync_pt { | 44 | struct gk20a_sync_pt { |
45 | struct gk20a *g; | 45 | struct gk20a *g; |
46 | struct kref refcount; | 46 | struct nvgpu_ref refcount; |
47 | u32 thresh; | 47 | u32 thresh; |
48 | struct nvgpu_semaphore *sema; | 48 | struct nvgpu_semaphore *sema; |
49 | struct gk20a_sync_timeline *obj; | 49 | struct gk20a_sync_timeline *obj; |
@@ -170,7 +170,7 @@ static struct gk20a_sync_timeline *to_gk20a_timeline(struct sync_timeline *obj) | |||
170 | return (struct gk20a_sync_timeline *)obj; | 170 | return (struct gk20a_sync_timeline *)obj; |
171 | } | 171 | } |
172 | 172 | ||
173 | static void gk20a_sync_pt_free_shared(struct kref *ref) | 173 | static void gk20a_sync_pt_free_shared(struct nvgpu_ref *ref) |
174 | { | 174 | { |
175 | struct gk20a_sync_pt *pt = | 175 | struct gk20a_sync_pt *pt = |
176 | container_of(ref, struct gk20a_sync_pt, refcount); | 176 | container_of(ref, struct gk20a_sync_pt, refcount); |
@@ -192,7 +192,7 @@ static struct gk20a_sync_pt *gk20a_sync_pt_create_shared( | |||
192 | if (!shared) | 192 | if (!shared) |
193 | return NULL; | 193 | return NULL; |
194 | 194 | ||
195 | kref_init(&shared->refcount); | 195 | nvgpu_ref_init(&shared->refcount); |
196 | shared->g = g; | 196 | shared->g = g; |
197 | shared->obj = obj; | 197 | shared->obj = obj; |
198 | shared->sema = sema; | 198 | shared->sema = sema; |
@@ -229,7 +229,7 @@ static void gk20a_sync_pt_free_inst(struct sync_pt *sync_pt) | |||
229 | { | 229 | { |
230 | struct gk20a_sync_pt *pt = to_gk20a_sync_pt(sync_pt); | 230 | struct gk20a_sync_pt *pt = to_gk20a_sync_pt(sync_pt); |
231 | if (pt) | 231 | if (pt) |
232 | kref_put(&pt->refcount, gk20a_sync_pt_free_shared); | 232 | nvgpu_ref_put(&pt->refcount, gk20a_sync_pt_free_shared); |
233 | } | 233 | } |
234 | 234 | ||
235 | static struct sync_pt *gk20a_sync_pt_dup_inst(struct sync_pt *sync_pt) | 235 | static struct sync_pt *gk20a_sync_pt_dup_inst(struct sync_pt *sync_pt) |
@@ -242,7 +242,7 @@ static struct sync_pt *gk20a_sync_pt_dup_inst(struct sync_pt *sync_pt) | |||
242 | if (!pti) | 242 | if (!pti) |
243 | return NULL; | 243 | return NULL; |
244 | pti->shared = pt; | 244 | pti->shared = pt; |
245 | kref_get(&pt->refcount); | 245 | nvgpu_ref_get(&pt->refcount); |
246 | return &pti->pt; | 246 | return &pti->pt; |
247 | } | 247 | } |
248 | 248 | ||
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c index 99d72292..f3e87a13 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.c | |||
@@ -104,7 +104,7 @@ int gk20a_tsg_bind_channel(struct tsg_gk20a *tsg, | |||
104 | nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); | 104 | nvgpu_list_add_tail(&ch->ch_entry, &tsg->ch_list); |
105 | up_write(&tsg->ch_list_lock); | 105 | up_write(&tsg->ch_list_lock); |
106 | 106 | ||
107 | kref_get(&tsg->refcount); | 107 | nvgpu_ref_get(&tsg->refcount); |
108 | 108 | ||
109 | gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", | 109 | gk20a_dbg(gpu_dbg_fn, "BIND tsg:%d channel:%d\n", |
110 | tsg->tsgid, ch->chid); | 110 | tsg->tsgid, ch->chid); |
@@ -122,7 +122,7 @@ int gk20a_tsg_unbind_channel(struct channel_gk20a *ch) | |||
122 | nvgpu_list_del(&ch->ch_entry); | 122 | nvgpu_list_del(&ch->ch_entry); |
123 | up_write(&tsg->ch_list_lock); | 123 | up_write(&tsg->ch_list_lock); |
124 | 124 | ||
125 | kref_put(&tsg->refcount, gk20a_tsg_release); | 125 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
126 | 126 | ||
127 | ch->tsgid = NVGPU_INVALID_TSG_ID; | 127 | ch->tsgid = NVGPU_INVALID_TSG_ID; |
128 | 128 | ||
@@ -257,7 +257,7 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g) | |||
257 | 257 | ||
258 | tsg->g = g; | 258 | tsg->g = g; |
259 | tsg->num_active_channels = 0; | 259 | tsg->num_active_channels = 0; |
260 | kref_init(&tsg->refcount); | 260 | nvgpu_ref_init(&tsg->refcount); |
261 | 261 | ||
262 | tsg->tsg_gr_ctx = NULL; | 262 | tsg->tsg_gr_ctx = NULL; |
263 | tsg->vm = NULL; | 263 | tsg->vm = NULL; |
@@ -287,11 +287,11 @@ struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g) | |||
287 | return tsg; | 287 | return tsg; |
288 | 288 | ||
289 | clean_up: | 289 | clean_up: |
290 | kref_put(&tsg->refcount, gk20a_tsg_release); | 290 | nvgpu_ref_put(&tsg->refcount, gk20a_tsg_release); |
291 | return NULL; | 291 | return NULL; |
292 | } | 292 | } |
293 | 293 | ||
294 | void gk20a_tsg_release(struct kref *ref) | 294 | void gk20a_tsg_release(struct nvgpu_ref *ref) |
295 | { | 295 | { |
296 | struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); | 296 | struct tsg_gk20a *tsg = container_of(ref, struct tsg_gk20a, refcount); |
297 | struct gk20a *g = tsg->g; | 297 | struct gk20a *g = tsg->g; |
diff --git a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h index 40e12105..9195d3d3 100644 --- a/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h +++ b/drivers/gpu/nvgpu/gk20a/tsg_gk20a.h | |||
@@ -25,7 +25,7 @@ struct channel_gk20a; | |||
25 | 25 | ||
26 | bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch); | 26 | bool gk20a_is_channel_marked_as_tsg(struct channel_gk20a *ch); |
27 | struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g); | 27 | struct tsg_gk20a *gk20a_tsg_open(struct gk20a *g); |
28 | void gk20a_tsg_release(struct kref *ref); | 28 | void gk20a_tsg_release(struct nvgpu_ref *ref); |
29 | 29 | ||
30 | int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid); | 30 | int gk20a_init_tsg_support(struct gk20a *g, u32 tsgid); |
31 | struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch); | 31 | struct tsg_gk20a *tsg_gk20a_from_ch(struct channel_gk20a *ch); |
@@ -36,7 +36,7 @@ struct tsg_gk20a { | |||
36 | bool in_use; | 36 | bool in_use; |
37 | int tsgid; | 37 | int tsgid; |
38 | 38 | ||
39 | struct kref refcount; | 39 | struct nvgpu_ref refcount; |
40 | 40 | ||
41 | struct nvgpu_list_node ch_list; | 41 | struct nvgpu_list_node ch_list; |
42 | int num_active_channels; | 42 | int num_active_channels; |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/atomic.h b/drivers/gpu/nvgpu/include/nvgpu/atomic.h index c7a5fcd9..393a9d35 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/atomic.h +++ b/drivers/gpu/nvgpu/include/nvgpu/atomic.h | |||
@@ -61,10 +61,18 @@ static inline bool nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v) | |||
61 | { | 61 | { |
62 | return __nvgpu_atomic_dec_and_test(v); | 62 | return __nvgpu_atomic_dec_and_test(v); |
63 | } | 63 | } |
64 | static inline bool nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v) | ||
65 | { | ||
66 | return __nvgpu_atomic_sub_and_test(i, v); | ||
67 | } | ||
64 | static inline int nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v) | 68 | static inline int nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v) |
65 | { | 69 | { |
66 | return __nvgpu_atomic_add_return(i, v); | 70 | return __nvgpu_atomic_add_return(i, v); |
67 | } | 71 | } |
72 | static inline int nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u) | ||
73 | { | ||
74 | return __nvgpu_atomic_add_unless(v, a, u); | ||
75 | } | ||
68 | static inline void nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i) | 76 | static inline void nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i) |
69 | { | 77 | { |
70 | return __nvgpu_atomic64_set(v, i); | 78 | return __nvgpu_atomic64_set(v, i); |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/kref.h b/drivers/gpu/nvgpu/include/nvgpu/kref.h index d24db603..fd2b456f 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/kref.h +++ b/drivers/gpu/nvgpu/include/nvgpu/kref.h | |||
@@ -10,11 +10,68 @@ | |||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
11 | * more details. | 11 | * more details. |
12 | */ | 12 | */ |
13 | |||
14 | /* | ||
15 | * The following structure is used for reference counting of objects in nvgpu. | ||
16 | */ | ||
13 | #ifndef __NVGPU_KREF_H__ | 17 | #ifndef __NVGPU_KREF_H__ |
14 | #define __NVGPU_KREF_H__ | 18 | #define __NVGPU_KREF_H__ |
15 | 19 | ||
16 | #ifdef __KERNEL__ | 20 | #include <nvgpu/atomic.h> |
17 | #include <linux/kref.h> | 21 | |
18 | #endif | 22 | struct nvgpu_ref { |
23 | nvgpu_atomic_t refcount; | ||
24 | }; | ||
25 | |||
26 | /* | ||
27 | * Initialize object. | ||
28 | * @ref: the nvgpu_ref object to initialize | ||
29 | */ | ||
30 | static inline void nvgpu_ref_init(struct nvgpu_ref *ref) | ||
31 | { | ||
32 | nvgpu_atomic_set(&ref->refcount, 1); | ||
33 | } | ||
34 | |||
35 | /* | ||
36 | * Increment reference count for the object | ||
37 | * @ref: the nvgpu_ref object | ||
38 | */ | ||
39 | static inline void nvgpu_ref_get(struct nvgpu_ref *ref) | ||
40 | { | ||
41 | nvgpu_atomic_inc(&ref->refcount); | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * Decrement reference count for the object and call release() if it becomes | ||
46 | * zero. | ||
47 | * @ref: the nvgpu_ref object | ||
48 | * @release: pointer to the function that would be invoked to clean up the | ||
49 | * object when the reference count becomes zero, i.e. the last | ||
50 | * reference corresponding to this object is removed. | ||
51 | * Return 1 if object was removed, otherwise return 0. The user should not | ||
52 | * make any assumptions about the status of the object in the memory when | ||
53 | * the function returns 0 and should only use it to know that there are no | ||
54 | * further references to this object. | ||
55 | */ | ||
56 | static inline int nvgpu_ref_put(struct nvgpu_ref *ref, | ||
57 | void (*release)(struct nvgpu_ref *r)) | ||
58 | { | ||
59 | if (nvgpu_atomic_sub_and_test(1, &ref->refcount)) { | ||
60 | if (release != NULL) | ||
61 | release(ref); | ||
62 | return 1; | ||
63 | } | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * Increment reference count for the object unless it is zero. | ||
69 | * @ref: the nvgpu_ref object | ||
70 | * Return non-zero if the increment succeeds, Otherwise return 0. | ||
71 | */ | ||
72 | static inline int __must_check nvgpu_ref_get_unless_zero(struct nvgpu_ref *ref) | ||
73 | { | ||
74 | return nvgpu_atomic_add_unless(&ref->refcount, 1, 0); | ||
75 | } | ||
19 | 76 | ||
20 | #endif /* __NVGPU_KREF_H__ */ | 77 | #endif /* __NVGPU_KREF_H__ */ |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/atomic.h b/drivers/gpu/nvgpu/include/nvgpu/linux/atomic.h index 1fdb2674..0734672e 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/linux/atomic.h +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/atomic.h | |||
@@ -81,11 +81,21 @@ static inline bool __nvgpu_atomic_dec_and_test(nvgpu_atomic_t *v) | |||
81 | return atomic_dec_and_test(&v->atomic_var); | 81 | return atomic_dec_and_test(&v->atomic_var); |
82 | } | 82 | } |
83 | 83 | ||
84 | static inline bool __nvgpu_atomic_sub_and_test(int i, nvgpu_atomic_t *v) | ||
85 | { | ||
86 | return atomic_sub_and_test(i, &v->atomic_var); | ||
87 | } | ||
88 | |||
84 | static inline int __nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v) | 89 | static inline int __nvgpu_atomic_add_return(int i, nvgpu_atomic_t *v) |
85 | { | 90 | { |
86 | return atomic_add_return(i, &v->atomic_var); | 91 | return atomic_add_return(i, &v->atomic_var); |
87 | } | 92 | } |
88 | 93 | ||
94 | static inline int __nvgpu_atomic_add_unless(nvgpu_atomic_t *v, int a, int u) | ||
95 | { | ||
96 | return atomic_add_unless(&v->atomic_var, a, u); | ||
97 | } | ||
98 | |||
89 | static inline void __nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i) | 99 | static inline void __nvgpu_atomic64_set(nvgpu_atomic64_t *v, long i) |
90 | { | 100 | { |
91 | atomic64_set(&v->atomic_var, i); | 101 | atomic64_set(&v->atomic_var, i); |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h index 90261d81..5c0019ae 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/semaphore.h +++ b/drivers/gpu/nvgpu/include/nvgpu/semaphore.h | |||
@@ -73,7 +73,7 @@ struct nvgpu_semaphore { | |||
73 | nvgpu_atomic_t value; | 73 | nvgpu_atomic_t value; |
74 | int incremented; | 74 | int incremented; |
75 | 75 | ||
76 | struct kref ref; | 76 | struct nvgpu_ref ref; |
77 | }; | 77 | }; |
78 | 78 | ||
79 | /* | 79 | /* |
@@ -106,7 +106,7 @@ struct nvgpu_semaphore_pool { | |||
106 | * done waiting on it. This ref count ensures that the pool doesn't | 106 | * done waiting on it. This ref count ensures that the pool doesn't |
107 | * go away until all semaphores using this pool are cleaned up first. | 107 | * go away until all semaphores using this pool are cleaned up first. |
108 | */ | 108 | */ |
109 | struct kref ref; | 109 | struct nvgpu_ref ref; |
110 | }; | 110 | }; |
111 | 111 | ||
112 | static inline struct nvgpu_semaphore_pool * | 112 | static inline struct nvgpu_semaphore_pool * |
diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index 255b4361..b5c64c99 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h | |||
@@ -88,7 +88,7 @@ struct nvgpu_mapped_buf { | |||
88 | u64 size; | 88 | u64 size; |
89 | struct dma_buf *dmabuf; | 89 | struct dma_buf *dmabuf; |
90 | struct sg_table *sgt; | 90 | struct sg_table *sgt; |
91 | struct kref ref; | 91 | struct nvgpu_ref ref; |
92 | u32 user_mapped; | 92 | u32 user_mapped; |
93 | bool own_mem_ref; | 93 | bool own_mem_ref; |
94 | u32 pgsz_idx; | 94 | u32 pgsz_idx; |
@@ -142,7 +142,7 @@ struct vm_gk20a { | |||
142 | 142 | ||
143 | const struct gk20a_mmu_level *mmu_levels; | 143 | const struct gk20a_mmu_level *mmu_levels; |
144 | 144 | ||
145 | struct kref ref; | 145 | struct nvgpu_ref ref; |
146 | 146 | ||
147 | struct nvgpu_mutex update_gmmu_lock; | 147 | struct nvgpu_mutex update_gmmu_lock; |
148 | 148 | ||
diff --git a/drivers/gpu/nvgpu/vgpu/vgpu.c b/drivers/gpu/nvgpu/vgpu/vgpu.c index 8978ca90..8f401ec6 100644 --- a/drivers/gpu/nvgpu/vgpu/vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/vgpu.c | |||
@@ -767,7 +767,7 @@ int vgpu_probe(struct platform_device *pdev) | |||
767 | vgpu_create_sysfs(dev); | 767 | vgpu_create_sysfs(dev); |
768 | gk20a_init_gr(gk20a); | 768 | gk20a_init_gr(gk20a); |
769 | 769 | ||
770 | kref_init(&gk20a->refcount); | 770 | nvgpu_ref_init(&gk20a->refcount); |
771 | 771 | ||
772 | return 0; | 772 | return 0; |
773 | } | 773 | } |