summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/mm_gk20a.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/mm_gk20a.c184
1 files changed, 92 insertions, 92 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
index c62d1f6c..2539138a 100644
--- a/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
+++ b/drivers/gpu/nvgpu/gk20a/mm_gk20a.c
@@ -142,7 +142,7 @@ static u32 gk20a_pramin_enter(struct gk20a *g, struct mem_desc *mem,
142 142
143 WARN_ON(!bufbase); 143 WARN_ON(!bufbase);
144 144
145 spin_lock(&g->mm.pramin_window_lock); 145 nvgpu_spinlock_acquire(&g->mm.pramin_window_lock);
146 146
147 if (g->mm.pramin_window != win) { 147 if (g->mm.pramin_window != win) {
148 gk20a_writel(g, bus_bar0_window_r(), win); 148 gk20a_writel(g, bus_bar0_window_r(), win);
@@ -158,7 +158,7 @@ static void gk20a_pramin_exit(struct gk20a *g, struct mem_desc *mem,
158{ 158{
159 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk); 159 gk20a_dbg(gpu_dbg_mem, "end for %p,%p", mem, chunk);
160 160
161 spin_unlock(&g->mm.pramin_window_lock); 161 nvgpu_spinlock_release(&g->mm.pramin_window_lock);
162} 162}
163 163
164/* 164/*
@@ -483,7 +483,7 @@ static int __must_check gk20a_init_ce_vm(struct mm_gk20a *mm);
483static struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf); 483static struct gk20a *gk20a_vidmem_buf_owner(struct dma_buf *dmabuf);
484 484
485struct gk20a_dmabuf_priv { 485struct gk20a_dmabuf_priv {
486 struct mutex lock; 486 struct nvgpu_mutex lock;
487 487
488 struct gk20a_comptag_allocator *comptag_allocator; 488 struct gk20a_comptag_allocator *comptag_allocator;
489 struct gk20a_comptags comptags; 489 struct gk20a_comptags comptags;
@@ -514,7 +514,7 @@ static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
514 unsigned long addr; 514 unsigned long addr;
515 int err = 0; 515 int err = 0;
516 516
517 mutex_lock(&allocator->lock); 517 nvgpu_mutex_acquire(&allocator->lock);
518 addr = bitmap_find_next_zero_area(allocator->bitmap, allocator->size, 518 addr = bitmap_find_next_zero_area(allocator->bitmap, allocator->size,
519 0, len, 0); 519 0, len, 0);
520 if (addr < allocator->size) { 520 if (addr < allocator->size) {
@@ -524,7 +524,7 @@ static int gk20a_comptaglines_alloc(struct gk20a_comptag_allocator *allocator,
524 } else { 524 } else {
525 err = -ENOMEM; 525 err = -ENOMEM;
526 } 526 }
527 mutex_unlock(&allocator->lock); 527 nvgpu_mutex_release(&allocator->lock);
528 528
529 return err; 529 return err;
530} 530}
@@ -538,9 +538,9 @@ static void gk20a_comptaglines_free(struct gk20a_comptag_allocator *allocator,
538 WARN_ON(addr > allocator->size); 538 WARN_ON(addr > allocator->size);
539 WARN_ON(addr + len > allocator->size); 539 WARN_ON(addr + len > allocator->size);
540 540
541 mutex_lock(&allocator->lock); 541 nvgpu_mutex_acquire(&allocator->lock);
542 bitmap_clear(allocator->bitmap, addr, len); 542 bitmap_clear(allocator->bitmap, addr, len);
543 mutex_unlock(&allocator->lock); 543 nvgpu_mutex_release(&allocator->lock);
544} 544}
545 545
546static void gk20a_mm_delete_priv(void *_priv) 546static void gk20a_mm_delete_priv(void *_priv)
@@ -575,12 +575,12 @@ struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
575 if (WARN_ON(!priv)) 575 if (WARN_ON(!priv))
576 return ERR_PTR(-EINVAL); 576 return ERR_PTR(-EINVAL);
577 577
578 mutex_lock(&priv->lock); 578 nvgpu_mutex_acquire(&priv->lock);
579 579
580 if (priv->pin_count == 0) { 580 if (priv->pin_count == 0) {
581 priv->attach = dma_buf_attach(dmabuf, dev); 581 priv->attach = dma_buf_attach(dmabuf, dev);
582 if (IS_ERR(priv->attach)) { 582 if (IS_ERR(priv->attach)) {
583 mutex_unlock(&priv->lock); 583 nvgpu_mutex_release(&priv->lock);
584 return (struct sg_table *)priv->attach; 584 return (struct sg_table *)priv->attach;
585 } 585 }
586 586
@@ -588,13 +588,13 @@ struct sg_table *gk20a_mm_pin(struct device *dev, struct dma_buf *dmabuf)
588 DMA_BIDIRECTIONAL); 588 DMA_BIDIRECTIONAL);
589 if (IS_ERR(priv->sgt)) { 589 if (IS_ERR(priv->sgt)) {
590 dma_buf_detach(dmabuf, priv->attach); 590 dma_buf_detach(dmabuf, priv->attach);
591 mutex_unlock(&priv->lock); 591 nvgpu_mutex_release(&priv->lock);
592 return priv->sgt; 592 return priv->sgt;
593 } 593 }
594 } 594 }
595 595
596 priv->pin_count++; 596 priv->pin_count++;
597 mutex_unlock(&priv->lock); 597 nvgpu_mutex_release(&priv->lock);
598 return priv->sgt; 598 return priv->sgt;
599} 599}
600 600
@@ -607,7 +607,7 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
607 if (IS_ERR(priv) || !priv) 607 if (IS_ERR(priv) || !priv)
608 return; 608 return;
609 609
610 mutex_lock(&priv->lock); 610 nvgpu_mutex_acquire(&priv->lock);
611 WARN_ON(priv->sgt != sgt); 611 WARN_ON(priv->sgt != sgt);
612 priv->pin_count--; 612 priv->pin_count--;
613 WARN_ON(priv->pin_count < 0); 613 WARN_ON(priv->pin_count < 0);
@@ -617,7 +617,7 @@ void gk20a_mm_unpin(struct device *dev, struct dma_buf *dmabuf,
617 DMA_BIDIRECTIONAL); 617 DMA_BIDIRECTIONAL);
618 dma_buf_detach(dmabuf, priv->attach); 618 dma_buf_detach(dmabuf, priv->attach);
619 } 619 }
620 mutex_unlock(&priv->lock); 620 nvgpu_mutex_release(&priv->lock);
621} 621}
622 622
623void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf, 623void gk20a_get_comptags(struct device *dev, struct dma_buf *dmabuf,
@@ -842,7 +842,7 @@ static int gk20a_alloc_sysmem_flush(struct gk20a *g)
842static void gk20a_init_pramin(struct mm_gk20a *mm) 842static void gk20a_init_pramin(struct mm_gk20a *mm)
843{ 843{
844 mm->pramin_window = 0; 844 mm->pramin_window = 0;
845 spin_lock_init(&mm->pramin_window_lock); 845 nvgpu_spinlock_init(&mm->pramin_window_lock);
846 mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT; 846 mm->force_pramin = GK20A_FORCE_PRAMIN_DEFAULT;
847} 847}
848 848
@@ -971,12 +971,12 @@ static int gk20a_init_vidmem(struct mm_gk20a *mm)
971 mm->vidmem.bootstrap_base = bootstrap_base; 971 mm->vidmem.bootstrap_base = bootstrap_base;
972 mm->vidmem.bootstrap_size = bootstrap_size; 972 mm->vidmem.bootstrap_size = bootstrap_size;
973 973
974 mutex_init(&mm->vidmem.first_clear_mutex); 974 nvgpu_mutex_init(&mm->vidmem.first_clear_mutex);
975 975
976 INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker); 976 INIT_WORK(&mm->vidmem.clear_mem_worker, gk20a_vidmem_clear_mem_worker);
977 atomic64_set(&mm->vidmem.bytes_pending, 0); 977 atomic64_set(&mm->vidmem.bytes_pending, 0);
978 INIT_LIST_HEAD(&mm->vidmem.clear_list_head); 978 INIT_LIST_HEAD(&mm->vidmem.clear_list_head);
979 mutex_init(&mm->vidmem.clear_list_mutex); 979 nvgpu_mutex_init(&mm->vidmem.clear_list_mutex);
980 980
981 gk20a_dbg_info("registered vidmem: %zu MB", size / SZ_1M); 981 gk20a_dbg_info("registered vidmem: %zu MB", size / SZ_1M);
982 982
@@ -998,7 +998,7 @@ int gk20a_init_mm_setup_sw(struct gk20a *g)
998 } 998 }
999 999
1000 mm->g = g; 1000 mm->g = g;
1001 mutex_init(&mm->l2_op_lock); 1001 nvgpu_mutex_init(&mm->l2_op_lock);
1002 1002
1003 /*TBD: make channel vm size configurable */ 1003 /*TBD: make channel vm size configurable */
1004 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE - 1004 mm->channel.user_size = NV_MM_DEFAULT_USER_SIZE -
@@ -1484,12 +1484,12 @@ int gk20a_vm_get_buffers(struct vm_gk20a *vm,
1484 return 0; 1484 return 0;
1485 } 1485 }
1486 1486
1487 mutex_lock(&vm->update_gmmu_lock); 1487 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1488 1488
1489 buffer_list = nvgpu_kalloc(sizeof(*buffer_list) * 1489 buffer_list = nvgpu_kalloc(sizeof(*buffer_list) *
1490 vm->num_user_mapped_buffers, true); 1490 vm->num_user_mapped_buffers, true);
1491 if (!buffer_list) { 1491 if (!buffer_list) {
1492 mutex_unlock(&vm->update_gmmu_lock); 1492 nvgpu_mutex_release(&vm->update_gmmu_lock);
1493 return -ENOMEM; 1493 return -ENOMEM;
1494 } 1494 }
1495 1495
@@ -1510,7 +1510,7 @@ int gk20a_vm_get_buffers(struct vm_gk20a *vm,
1510 *num_buffers = vm->num_user_mapped_buffers; 1510 *num_buffers = vm->num_user_mapped_buffers;
1511 *mapped_buffers = buffer_list; 1511 *mapped_buffers = buffer_list;
1512 1512
1513 mutex_unlock(&vm->update_gmmu_lock); 1513 nvgpu_mutex_release(&vm->update_gmmu_lock);
1514 1514
1515 return 0; 1515 return 0;
1516} 1516}
@@ -1544,9 +1544,9 @@ void gk20a_vm_mapping_batch_finish_locked(
1544void gk20a_vm_mapping_batch_finish(struct vm_gk20a *vm, 1544void gk20a_vm_mapping_batch_finish(struct vm_gk20a *vm,
1545 struct vm_gk20a_mapping_batch *mapping_batch) 1545 struct vm_gk20a_mapping_batch *mapping_batch)
1546{ 1546{
1547 mutex_lock(&vm->update_gmmu_lock); 1547 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1548 gk20a_vm_mapping_batch_finish_locked(vm, mapping_batch); 1548 gk20a_vm_mapping_batch_finish_locked(vm, mapping_batch);
1549 mutex_unlock(&vm->update_gmmu_lock); 1549 nvgpu_mutex_release(&vm->update_gmmu_lock);
1550} 1550}
1551 1551
1552void gk20a_vm_put_buffers(struct vm_gk20a *vm, 1552void gk20a_vm_put_buffers(struct vm_gk20a *vm,
@@ -1559,7 +1559,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1559 if (num_buffers == 0) 1559 if (num_buffers == 0)
1560 return; 1560 return;
1561 1561
1562 mutex_lock(&vm->update_gmmu_lock); 1562 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1563 gk20a_vm_mapping_batch_start(&batch); 1563 gk20a_vm_mapping_batch_start(&batch);
1564 vm->kref_put_batch = &batch; 1564 vm->kref_put_batch = &batch;
1565 1565
@@ -1569,7 +1569,7 @@ void gk20a_vm_put_buffers(struct vm_gk20a *vm,
1569 1569
1570 vm->kref_put_batch = NULL; 1570 vm->kref_put_batch = NULL;
1571 gk20a_vm_mapping_batch_finish_locked(vm, &batch); 1571 gk20a_vm_mapping_batch_finish_locked(vm, &batch);
1572 mutex_unlock(&vm->update_gmmu_lock); 1572 nvgpu_mutex_release(&vm->update_gmmu_lock);
1573 1573
1574 nvgpu_kfree(mapped_buffers); 1574 nvgpu_kfree(mapped_buffers);
1575} 1575}
@@ -1581,17 +1581,17 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1581 int retries = 10000; /* 50 ms */ 1581 int retries = 10000; /* 50 ms */
1582 struct mapped_buffer_node *mapped_buffer; 1582 struct mapped_buffer_node *mapped_buffer;
1583 1583
1584 mutex_lock(&vm->update_gmmu_lock); 1584 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1585 1585
1586 mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset); 1586 mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset);
1587 if (!mapped_buffer) { 1587 if (!mapped_buffer) {
1588 mutex_unlock(&vm->update_gmmu_lock); 1588 nvgpu_mutex_release(&vm->update_gmmu_lock);
1589 gk20a_err(d, "invalid addr to unmap 0x%llx", offset); 1589 gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
1590 return; 1590 return;
1591 } 1591 }
1592 1592
1593 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { 1593 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
1594 mutex_unlock(&vm->update_gmmu_lock); 1594 nvgpu_mutex_release(&vm->update_gmmu_lock);
1595 1595
1596 while (retries >= 0 || !tegra_platform_is_silicon()) { 1596 while (retries >= 0 || !tegra_platform_is_silicon()) {
1597 if (atomic_read(&mapped_buffer->ref.refcount) == 1) 1597 if (atomic_read(&mapped_buffer->ref.refcount) == 1)
@@ -1602,11 +1602,11 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1602 if (retries < 0 && tegra_platform_is_silicon()) 1602 if (retries < 0 && tegra_platform_is_silicon())
1603 gk20a_err(d, "sync-unmap failed on 0x%llx", 1603 gk20a_err(d, "sync-unmap failed on 0x%llx",
1604 offset); 1604 offset);
1605 mutex_lock(&vm->update_gmmu_lock); 1605 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
1606 } 1606 }
1607 1607
1608 if (mapped_buffer->user_mapped == 0) { 1608 if (mapped_buffer->user_mapped == 0) {
1609 mutex_unlock(&vm->update_gmmu_lock); 1609 nvgpu_mutex_release(&vm->update_gmmu_lock);
1610 gk20a_err(d, "addr already unmapped from user 0x%llx", offset); 1610 gk20a_err(d, "addr already unmapped from user 0x%llx", offset);
1611 return; 1611 return;
1612 } 1612 }
@@ -1619,7 +1619,7 @@ static void gk20a_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
1619 kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); 1619 kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
1620 vm->kref_put_batch = NULL; 1620 vm->kref_put_batch = NULL;
1621 1621
1622 mutex_unlock(&vm->update_gmmu_lock); 1622 nvgpu_mutex_release(&vm->update_gmmu_lock);
1623} 1623}
1624 1624
1625u64 gk20a_vm_alloc_va(struct vm_gk20a *vm, 1625u64 gk20a_vm_alloc_va(struct vm_gk20a *vm,
@@ -2239,7 +2239,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
2239 buf->g = g; 2239 buf->g = g;
2240 2240
2241 if (!g->mm.vidmem.cleared) { 2241 if (!g->mm.vidmem.cleared) {
2242 mutex_lock(&g->mm.vidmem.first_clear_mutex); 2242 nvgpu_mutex_acquire(&g->mm.vidmem.first_clear_mutex);
2243 if (!g->mm.vidmem.cleared) { 2243 if (!g->mm.vidmem.cleared) {
2244 err = gk20a_vidmem_clear_all(g); 2244 err = gk20a_vidmem_clear_all(g);
2245 if (err) { 2245 if (err) {
@@ -2248,7 +2248,7 @@ int gk20a_vidmem_buf_alloc(struct gk20a *g, size_t bytes)
2248 goto err_kfree; 2248 goto err_kfree;
2249 } 2249 }
2250 } 2250 }
2251 mutex_unlock(&g->mm.vidmem.first_clear_mutex); 2251 nvgpu_mutex_release(&g->mm.vidmem.first_clear_mutex);
2252 } 2252 }
2253 2253
2254 buf->mem = kzalloc(sizeof(struct mem_desc), GFP_KERNEL); 2254 buf->mem = kzalloc(sizeof(struct mem_desc), GFP_KERNEL);
@@ -2301,10 +2301,10 @@ int gk20a_vidmem_get_space(struct gk20a *g, u64 *space)
2301 if (!nvgpu_alloc_initialized(allocator)) 2301 if (!nvgpu_alloc_initialized(allocator))
2302 return -ENOSYS; 2302 return -ENOSYS;
2303 2303
2304 mutex_lock(&g->mm.vidmem.clear_list_mutex); 2304 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
2305 *space = nvgpu_alloc_space(allocator) + 2305 *space = nvgpu_alloc_space(allocator) +
2306 atomic64_read(&g->mm.vidmem.bytes_pending); 2306 atomic64_read(&g->mm.vidmem.bytes_pending);
2307 mutex_unlock(&g->mm.vidmem.clear_list_mutex); 2307 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
2308 return 0; 2308 return 0;
2309#else 2309#else
2310 return -ENOSYS; 2310 return -ENOSYS;
@@ -2425,7 +2425,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2425 return -EFAULT; 2425 return -EFAULT;
2426 } 2426 }
2427 2427
2428 mutex_lock(&vm->update_gmmu_lock); 2428 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
2429 2429
2430 /* check if this buffer is already mapped */ 2430 /* check if this buffer is already mapped */
2431 if (!vm->userspace_managed) { 2431 if (!vm->userspace_managed) {
@@ -2434,7 +2434,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2434 flags, kind, sgt, 2434 flags, kind, sgt,
2435 user_mapped, rw_flag); 2435 user_mapped, rw_flag);
2436 if (map_offset) { 2436 if (map_offset) {
2437 mutex_unlock(&vm->update_gmmu_lock); 2437 nvgpu_mutex_release(&vm->update_gmmu_lock);
2438 return map_offset; 2438 return map_offset;
2439 } 2439 }
2440 } 2440 }
@@ -2627,7 +2627,7 @@ u64 gk20a_vm_map(struct vm_gk20a *vm,
2627 mapped_buffer->va_node = va_node; 2627 mapped_buffer->va_node = va_node;
2628 } 2628 }
2629 2629
2630 mutex_unlock(&vm->update_gmmu_lock); 2630 nvgpu_mutex_release(&vm->update_gmmu_lock);
2631 2631
2632 return map_offset; 2632 return map_offset;
2633 2633
@@ -2643,7 +2643,7 @@ clean_up:
2643 if (!IS_ERR(bfr.sgt)) 2643 if (!IS_ERR(bfr.sgt))
2644 gk20a_mm_unpin(d, dmabuf, bfr.sgt); 2644 gk20a_mm_unpin(d, dmabuf, bfr.sgt);
2645 2645
2646 mutex_unlock(&vm->update_gmmu_lock); 2646 nvgpu_mutex_release(&vm->update_gmmu_lock);
2647 gk20a_dbg_info("err=%d\n", err); 2647 gk20a_dbg_info("err=%d\n", err);
2648 return 0; 2648 return 0;
2649} 2649}
@@ -2658,13 +2658,13 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2658 struct mapped_buffer_node *mapped_buffer; 2658 struct mapped_buffer_node *mapped_buffer;
2659 struct device *d = dev_from_vm(vm); 2659 struct device *d = dev_from_vm(vm);
2660 2660
2661 mutex_lock(&vm->update_gmmu_lock); 2661 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
2662 2662
2663 mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva); 2663 mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva);
2664 2664
2665 if (!mapped_buffer || !mapped_buffer->user_mapped) 2665 if (!mapped_buffer || !mapped_buffer->user_mapped)
2666 { 2666 {
2667 mutex_unlock(&vm->update_gmmu_lock); 2667 nvgpu_mutex_release(&vm->update_gmmu_lock);
2668 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); 2668 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva);
2669 return -EFAULT; 2669 return -EFAULT;
2670 } 2670 }
@@ -2685,7 +2685,7 @@ int gk20a_vm_get_compbits_info(struct vm_gk20a *vm,
2685 *mapping_ctagline = mapped_buffer->ctag_offset; 2685 *mapping_ctagline = mapped_buffer->ctag_offset;
2686 } 2686 }
2687 2687
2688 mutex_unlock(&vm->update_gmmu_lock); 2688 nvgpu_mutex_release(&vm->update_gmmu_lock);
2689 return 0; 2689 return 0;
2690} 2690}
2691 2691
@@ -2716,19 +2716,19 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2716 return -EFAULT; 2716 return -EFAULT;
2717 } 2717 }
2718 2718
2719 mutex_lock(&vm->update_gmmu_lock); 2719 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
2720 2720
2721 mapped_buffer = 2721 mapped_buffer =
2722 find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva); 2722 find_mapped_buffer_locked(&vm->mapped_buffers, mapping_gva);
2723 2723
2724 if (!mapped_buffer || !mapped_buffer->user_mapped) { 2724 if (!mapped_buffer || !mapped_buffer->user_mapped) {
2725 mutex_unlock(&vm->update_gmmu_lock); 2725 nvgpu_mutex_release(&vm->update_gmmu_lock);
2726 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva); 2726 gk20a_err(d, "%s: bad offset 0x%llx", __func__, mapping_gva);
2727 return -EFAULT; 2727 return -EFAULT;
2728 } 2728 }
2729 2729
2730 if (!mapped_buffer->ctags_mappable) { 2730 if (!mapped_buffer->ctags_mappable) {
2731 mutex_unlock(&vm->update_gmmu_lock); 2731 nvgpu_mutex_release(&vm->update_gmmu_lock);
2732 gk20a_err(d, "%s: comptags not mappable, offset 0x%llx", 2732 gk20a_err(d, "%s: comptags not mappable, offset 0x%llx",
2733 __func__, mapping_gva); 2733 __func__, mapping_gva);
2734 return -EFAULT; 2734 return -EFAULT;
@@ -2747,7 +2747,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2747 u64 cacheline_offset_start; 2747 u64 cacheline_offset_start;
2748 2748
2749 if (!mapped_buffer->ctag_map_win_size) { 2749 if (!mapped_buffer->ctag_map_win_size) {
2750 mutex_unlock(&vm->update_gmmu_lock); 2750 nvgpu_mutex_release(&vm->update_gmmu_lock);
2751 gk20a_err(d, 2751 gk20a_err(d,
2752 "%s: mapping 0x%llx does not have " 2752 "%s: mapping 0x%llx does not have "
2753 "mappable comptags", 2753 "mappable comptags",
@@ -2774,7 +2774,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2774 mapped_buffer->ctag_map_win_size, &va_node); 2774 mapped_buffer->ctag_map_win_size, &va_node);
2775 2775
2776 if (err) { 2776 if (err) {
2777 mutex_unlock(&vm->update_gmmu_lock); 2777 nvgpu_mutex_release(&vm->update_gmmu_lock);
2778 return err; 2778 return err;
2779 } 2779 }
2780 2780
@@ -2783,7 +2783,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2783 * pointer if the space is freed 2783 * pointer if the space is freed
2784 * before before the buffer is 2784 * before before the buffer is
2785 * unmapped */ 2785 * unmapped */
2786 mutex_unlock(&vm->update_gmmu_lock); 2786 nvgpu_mutex_release(&vm->update_gmmu_lock);
2787 gk20a_err(d, 2787 gk20a_err(d,
2788 "%s: comptags cannot be mapped into allocated space", 2788 "%s: comptags cannot be mapped into allocated space",
2789 __func__); 2789 __func__);
@@ -2810,7 +2810,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2810 g->gr.compbit_store.mem.aperture); 2810 g->gr.compbit_store.mem.aperture);
2811 2811
2812 if (!mapped_buffer->ctag_map_win_addr) { 2812 if (!mapped_buffer->ctag_map_win_addr) {
2813 mutex_unlock(&vm->update_gmmu_lock); 2813 nvgpu_mutex_release(&vm->update_gmmu_lock);
2814 gk20a_err(d, 2814 gk20a_err(d,
2815 "%s: failed to map comptags for mapping 0x%llx", 2815 "%s: failed to map comptags for mapping 0x%llx",
2816 __func__, mapping_gva); 2816 __func__, mapping_gva);
@@ -2818,7 +2818,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2818 } 2818 }
2819 } else if (fixed_mapping && *compbits_win_gva && 2819 } else if (fixed_mapping && *compbits_win_gva &&
2820 mapped_buffer->ctag_map_win_addr != *compbits_win_gva) { 2820 mapped_buffer->ctag_map_win_addr != *compbits_win_gva) {
2821 mutex_unlock(&vm->update_gmmu_lock); 2821 nvgpu_mutex_release(&vm->update_gmmu_lock);
2822 gk20a_err(d, 2822 gk20a_err(d,
2823 "%s: re-requesting comptags map into mismatching address. buffer offset 0x" 2823 "%s: re-requesting comptags map into mismatching address. buffer offset 0x"
2824 "%llx, existing comptag map at 0x%llx, requested remap 0x%llx", 2824 "%llx, existing comptag map at 0x%llx, requested remap 0x%llx",
@@ -2830,7 +2830,7 @@ int gk20a_vm_map_compbits(struct vm_gk20a *vm,
2830 *mapping_iova = gk20a_mm_iova_addr(g, mapped_buffer->sgt->sgl, 0); 2830 *mapping_iova = gk20a_mm_iova_addr(g, mapped_buffer->sgt->sgl, 0);
2831 *compbits_win_gva = mapped_buffer->ctag_map_win_addr; 2831 *compbits_win_gva = mapped_buffer->ctag_map_win_addr;
2832 2832
2833 mutex_unlock(&vm->update_gmmu_lock); 2833 nvgpu_mutex_release(&vm->update_gmmu_lock);
2834 2834
2835 return 0; 2835 return 0;
2836} 2836}
@@ -2852,7 +2852,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2852 struct gk20a *g = gk20a_from_vm(vm); 2852 struct gk20a *g = gk20a_from_vm(vm);
2853 u64 vaddr; 2853 u64 vaddr;
2854 2854
2855 mutex_lock(&vm->update_gmmu_lock); 2855 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
2856 vaddr = g->ops.mm.gmmu_map(vm, addr, 2856 vaddr = g->ops.mm.gmmu_map(vm, addr,
2857 *sgt, /* sg table */ 2857 *sgt, /* sg table */
2858 0, /* sg offset */ 2858 0, /* sg offset */
@@ -2866,7 +2866,7 @@ static u64 __gk20a_gmmu_map(struct vm_gk20a *vm,
2866 priv, /* priv */ 2866 priv, /* priv */
2867 NULL, /* mapping_batch handle */ 2867 NULL, /* mapping_batch handle */
2868 aperture); 2868 aperture);
2869 mutex_unlock(&vm->update_gmmu_lock); 2869 nvgpu_mutex_release(&vm->update_gmmu_lock);
2870 if (!vaddr) { 2870 if (!vaddr) {
2871 gk20a_err(dev_from_vm(vm), "failed to allocate va space"); 2871 gk20a_err(dev_from_vm(vm), "failed to allocate va space");
2872 return 0; 2872 return 0;
@@ -3128,10 +3128,10 @@ int gk20a_gmmu_alloc_attr_vid_at(struct gk20a *g, enum dma_attr attr,
3128 * are not done anyway */ 3128 * are not done anyway */
3129 WARN_ON(attr != 0 && attr != DMA_ATTR_NO_KERNEL_MAPPING); 3129 WARN_ON(attr != 0 && attr != DMA_ATTR_NO_KERNEL_MAPPING);
3130 3130
3131 mutex_lock(&g->mm.vidmem.clear_list_mutex); 3131 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
3132 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending); 3132 before_pending = atomic64_read(&g->mm.vidmem.bytes_pending);
3133 addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size); 3133 addr = __gk20a_gmmu_alloc(vidmem_alloc, at, size);
3134 mutex_unlock(&g->mm.vidmem.clear_list_mutex); 3134 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
3135 if (!addr) { 3135 if (!addr) {
3136 /* 3136 /*
3137 * If memory is known to be freed soon, let the user know that 3137 * If memory is known to be freed soon, let the user know that
@@ -3188,12 +3188,12 @@ static void gk20a_gmmu_free_attr_vid(struct gk20a *g, enum dma_attr attr,
3188 bool was_empty; 3188 bool was_empty;
3189 3189
3190 if (mem->user_mem) { 3190 if (mem->user_mem) {
3191 mutex_lock(&g->mm.vidmem.clear_list_mutex); 3191 nvgpu_mutex_acquire(&g->mm.vidmem.clear_list_mutex);
3192 was_empty = list_empty(&g->mm.vidmem.clear_list_head); 3192 was_empty = list_empty(&g->mm.vidmem.clear_list_head);
3193 list_add_tail(&mem->clear_list_entry, 3193 list_add_tail(&mem->clear_list_entry,
3194 &g->mm.vidmem.clear_list_head); 3194 &g->mm.vidmem.clear_list_head);
3195 atomic64_add(mem->size, &g->mm.vidmem.bytes_pending); 3195 atomic64_add(mem->size, &g->mm.vidmem.bytes_pending);
3196 mutex_unlock(&g->mm.vidmem.clear_list_mutex); 3196 nvgpu_mutex_release(&g->mm.vidmem.clear_list_mutex);
3197 3197
3198 if (was_empty) { 3198 if (was_empty) {
3199 cancel_work_sync(&g->mm.vidmem.clear_mem_worker); 3199 cancel_work_sync(&g->mm.vidmem.clear_mem_worker);
@@ -3258,12 +3258,12 @@ static struct mem_desc *get_pending_mem_desc(struct mm_gk20a *mm)
3258{ 3258{
3259 struct mem_desc *mem = NULL; 3259 struct mem_desc *mem = NULL;
3260 3260
3261 mutex_lock(&mm->vidmem.clear_list_mutex); 3261 nvgpu_mutex_acquire(&mm->vidmem.clear_list_mutex);
3262 mem = list_first_entry_or_null(&mm->vidmem.clear_list_head, 3262 mem = list_first_entry_or_null(&mm->vidmem.clear_list_head,
3263 struct mem_desc, clear_list_entry); 3263 struct mem_desc, clear_list_entry);
3264 if (mem) 3264 if (mem)
3265 list_del_init(&mem->clear_list_entry); 3265 list_del_init(&mem->clear_list_entry);
3266 mutex_unlock(&mm->vidmem.clear_list_mutex); 3266 nvgpu_mutex_release(&mm->vidmem.clear_list_mutex);
3267 3267
3268 return mem; 3268 return mem;
3269} 3269}
@@ -3409,12 +3409,12 @@ dma_addr_t gk20a_mm_gpuva_to_iova_base(struct vm_gk20a *vm, u64 gpu_vaddr)
3409 dma_addr_t addr = 0; 3409 dma_addr_t addr = 0;
3410 struct gk20a *g = gk20a_from_vm(vm); 3410 struct gk20a *g = gk20a_from_vm(vm);
3411 3411
3412 mutex_lock(&vm->update_gmmu_lock); 3412 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
3413 buffer = find_mapped_buffer_locked(&vm->mapped_buffers, gpu_vaddr); 3413 buffer = find_mapped_buffer_locked(&vm->mapped_buffers, gpu_vaddr);
3414 if (buffer) 3414 if (buffer)
3415 addr = g->ops.mm.get_iova_addr(g, buffer->sgt->sgl, 3415 addr = g->ops.mm.get_iova_addr(g, buffer->sgt->sgl,
3416 buffer->flags); 3416 buffer->flags);
3417 mutex_unlock(&vm->update_gmmu_lock); 3417 nvgpu_mutex_release(&vm->update_gmmu_lock);
3418 3418
3419 return addr; 3419 return addr;
3420} 3420}
@@ -3426,7 +3426,7 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm,
3426{ 3426{
3427 struct gk20a *g = gk20a_from_vm(vm); 3427 struct gk20a *g = gk20a_from_vm(vm);
3428 3428
3429 mutex_lock(&vm->update_gmmu_lock); 3429 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
3430 g->ops.mm.gmmu_unmap(vm, 3430 g->ops.mm.gmmu_unmap(vm,
3431 vaddr, 3431 vaddr,
3432 size, 3432 size,
@@ -3435,7 +3435,7 @@ void gk20a_gmmu_unmap(struct vm_gk20a *vm,
3435 rw_flag, 3435 rw_flag,
3436 false, 3436 false,
3437 NULL); 3437 NULL);
3438 mutex_unlock(&vm->update_gmmu_lock); 3438 nvgpu_mutex_release(&vm->update_gmmu_lock);
3439} 3439}
3440 3440
3441phys_addr_t gk20a_get_phys_from_iova(struct device *d, 3441phys_addr_t gk20a_get_phys_from_iova(struct device *d,
@@ -4053,16 +4053,16 @@ void gk20a_vm_unmap(struct vm_gk20a *vm, u64 offset)
4053 struct device *d = dev_from_vm(vm); 4053 struct device *d = dev_from_vm(vm);
4054 struct mapped_buffer_node *mapped_buffer; 4054 struct mapped_buffer_node *mapped_buffer;
4055 4055
4056 mutex_lock(&vm->update_gmmu_lock); 4056 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
4057 mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset); 4057 mapped_buffer = find_mapped_buffer_locked(&vm->mapped_buffers, offset);
4058 if (!mapped_buffer) { 4058 if (!mapped_buffer) {
4059 mutex_unlock(&vm->update_gmmu_lock); 4059 nvgpu_mutex_release(&vm->update_gmmu_lock);
4060 gk20a_err(d, "invalid addr to unmap 0x%llx", offset); 4060 gk20a_err(d, "invalid addr to unmap 0x%llx", offset);
4061 return; 4061 return;
4062 } 4062 }
4063 4063
4064 kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref); 4064 kref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_kref);
4065 mutex_unlock(&vm->update_gmmu_lock); 4065 nvgpu_mutex_release(&vm->update_gmmu_lock);
4066} 4066}
4067 4067
4068static void gk20a_vm_free_entries(struct vm_gk20a *vm, 4068static void gk20a_vm_free_entries(struct vm_gk20a *vm,
@@ -4101,7 +4101,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4101 } 4101 }
4102 } 4102 }
4103 4103
4104 mutex_lock(&vm->update_gmmu_lock); 4104 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
4105 4105
4106 /* TBD: add a flag here for the unmap code to recognize teardown 4106 /* TBD: add a flag here for the unmap code to recognize teardown
4107 * and short-circuit any otherwise expensive operations. */ 4107 * and short-circuit any otherwise expensive operations. */
@@ -4123,7 +4123,7 @@ static void gk20a_vm_remove_support_nofree(struct vm_gk20a *vm)
4123 4123
4124 gk20a_deinit_vm(vm); 4124 gk20a_deinit_vm(vm);
4125 4125
4126 mutex_unlock(&vm->update_gmmu_lock); 4126 nvgpu_mutex_release(&vm->update_gmmu_lock);
4127} 4127}
4128 4128
4129void gk20a_vm_remove_support(struct vm_gk20a *vm) 4129void gk20a_vm_remove_support(struct vm_gk20a *vm)
@@ -4547,7 +4547,7 @@ int gk20a_init_vm(struct mm_gk20a *mm,
4547 4547
4548 vm->mapped_buffers = RB_ROOT; 4548 vm->mapped_buffers = RB_ROOT;
4549 4549
4550 mutex_init(&vm->update_gmmu_lock); 4550 nvgpu_mutex_init(&vm->update_gmmu_lock);
4551 kref_init(&vm->ref); 4551 kref_init(&vm->ref);
4552 INIT_LIST_HEAD(&vm->reserved_va_list); 4552 INIT_LIST_HEAD(&vm->reserved_va_list);
4553 4553
@@ -4696,7 +4696,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4696 INIT_LIST_HEAD(&va_node->va_buffers_list); 4696 INIT_LIST_HEAD(&va_node->va_buffers_list);
4697 INIT_LIST_HEAD(&va_node->reserved_va_list); 4697 INIT_LIST_HEAD(&va_node->reserved_va_list);
4698 4698
4699 mutex_lock(&vm->update_gmmu_lock); 4699 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
4700 4700
4701 /* mark that we need to use sparse mappings here */ 4701 /* mark that we need to use sparse mappings here */
4702 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) { 4702 if (args->flags & NVGPU_AS_ALLOC_SPACE_FLAGS_SPARSE) {
@@ -4715,7 +4715,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4715 NULL, 4715 NULL,
4716 APERTURE_INVALID); 4716 APERTURE_INVALID);
4717 if (!map_offset) { 4717 if (!map_offset) {
4718 mutex_unlock(&vm->update_gmmu_lock); 4718 nvgpu_mutex_release(&vm->update_gmmu_lock);
4719 nvgpu_free(vma, vaddr_start); 4719 nvgpu_free(vma, vaddr_start);
4720 kfree(va_node); 4720 kfree(va_node);
4721 goto clean_up; 4721 goto clean_up;
@@ -4725,7 +4725,7 @@ int gk20a_vm_alloc_space(struct gk20a_as_share *as_share,
4725 } 4725 }
4726 list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list); 4726 list_add_tail(&va_node->reserved_va_list, &vm->reserved_va_list);
4727 4727
4728 mutex_unlock(&vm->update_gmmu_lock); 4728 nvgpu_mutex_release(&vm->update_gmmu_lock);
4729 4729
4730 args->o_a.offset = vaddr_start; 4730 args->o_a.offset = vaddr_start;
4731 err = 0; 4731 err = 0;
@@ -4754,7 +4754,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4754 vma = vm->vma[pgsz_idx]; 4754 vma = vm->vma[pgsz_idx];
4755 nvgpu_free(vma, args->offset); 4755 nvgpu_free(vma, args->offset);
4756 4756
4757 mutex_lock(&vm->update_gmmu_lock); 4757 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
4758 va_node = addr_to_reservation(vm, args->offset); 4758 va_node = addr_to_reservation(vm, args->offset);
4759 if (va_node) { 4759 if (va_node) {
4760 struct mapped_buffer_node *buffer, *n; 4760 struct mapped_buffer_node *buffer, *n;
@@ -4782,7 +4782,7 @@ int gk20a_vm_free_space(struct gk20a_as_share *as_share,
4782 NULL); 4782 NULL);
4783 kfree(va_node); 4783 kfree(va_node);
4784 } 4784 }
4785 mutex_unlock(&vm->update_gmmu_lock); 4785 nvgpu_mutex_release(&vm->update_gmmu_lock);
4786 err = 0; 4786 err = 0;
4787 4787
4788 return err; 4788 return err;
@@ -4819,7 +4819,7 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
4819 if (likely(priv)) 4819 if (likely(priv))
4820 return 0; 4820 return 0;
4821 4821
4822 mutex_lock(&priv_lock); 4822 nvgpu_mutex_acquire(&priv_lock);
4823 priv = dma_buf_get_drvdata(dmabuf, dev); 4823 priv = dma_buf_get_drvdata(dmabuf, dev);
4824 if (priv) 4824 if (priv)
4825 goto priv_exist_or_err; 4825 goto priv_exist_or_err;
@@ -4828,12 +4828,12 @@ int gk20a_dmabuf_alloc_drvdata(struct dma_buf *dmabuf, struct device *dev)
4828 priv = ERR_PTR(-ENOMEM); 4828 priv = ERR_PTR(-ENOMEM);
4829 goto priv_exist_or_err; 4829 goto priv_exist_or_err;
4830 } 4830 }
4831 mutex_init(&priv->lock); 4831 nvgpu_mutex_init(&priv->lock);
4832 INIT_LIST_HEAD(&priv->states); 4832 INIT_LIST_HEAD(&priv->states);
4833 priv->buffer_id = ++priv_count; 4833 priv->buffer_id = ++priv_count;
4834 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv); 4834 dma_buf_set_drvdata(dmabuf, dev, priv, gk20a_mm_delete_priv);
4835priv_exist_or_err: 4835priv_exist_or_err:
4836 mutex_unlock(&priv_lock); 4836 nvgpu_mutex_release(&priv_lock);
4837 if (IS_ERR(priv)) 4837 if (IS_ERR(priv))
4838 return -ENOMEM; 4838 return -ENOMEM;
4839 4839
@@ -4858,7 +4858,7 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev,
4858 if (WARN_ON(!priv)) 4858 if (WARN_ON(!priv))
4859 return -ENOSYS; 4859 return -ENOSYS;
4860 4860
4861 mutex_lock(&priv->lock); 4861 nvgpu_mutex_acquire(&priv->lock);
4862 4862
4863 list_for_each_entry(s, &priv->states, list) 4863 list_for_each_entry(s, &priv->states, list)
4864 if (s->offset == offset) 4864 if (s->offset == offset)
@@ -4873,11 +4873,11 @@ int gk20a_dmabuf_get_state(struct dma_buf *dmabuf, struct device *dev,
4873 4873
4874 s->offset = offset; 4874 s->offset = offset;
4875 INIT_LIST_HEAD(&s->list); 4875 INIT_LIST_HEAD(&s->list);
4876 mutex_init(&s->lock); 4876 nvgpu_mutex_init(&s->lock);
4877 list_add_tail(&s->list, &priv->states); 4877 list_add_tail(&s->list, &priv->states);
4878 4878
4879out: 4879out:
4880 mutex_unlock(&priv->lock); 4880 nvgpu_mutex_release(&priv->lock);
4881 if (!err) 4881 if (!err)
4882 *state = s; 4882 *state = s;
4883 return err; 4883 return err;
@@ -5152,7 +5152,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
5152 5152
5153 nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER); 5153 nvgpu_timeout_init(g, &timeout, 100, NVGPU_TIMER_RETRY_TIMER);
5154 5154
5155 mutex_lock(&mm->l2_op_lock); 5155 nvgpu_mutex_acquire(&mm->l2_op_lock);
5156 5156
5157 /* Make sure all previous writes are committed to the L2. There's no 5157 /* Make sure all previous writes are committed to the L2. There's no
5158 guarantee that writes are to DRAM. This will be a sysmembar internal 5158 guarantee that writes are to DRAM. This will be a sysmembar internal
@@ -5184,7 +5184,7 @@ int gk20a_mm_fb_flush(struct gk20a *g)
5184 5184
5185 trace_gk20a_mm_fb_flush_done(dev_name(g->dev)); 5185 trace_gk20a_mm_fb_flush_done(dev_name(g->dev));
5186 5186
5187 mutex_unlock(&mm->l2_op_lock); 5187 nvgpu_mutex_release(&mm->l2_op_lock);
5188 5188
5189 pm_runtime_put_noidle(g->dev); 5189 pm_runtime_put_noidle(g->dev);
5190 5190
@@ -5231,9 +5231,9 @@ void gk20a_mm_l2_invalidate(struct gk20a *g)
5231 struct mm_gk20a *mm = &g->mm; 5231 struct mm_gk20a *mm = &g->mm;
5232 gk20a_busy_noresume(g->dev); 5232 gk20a_busy_noresume(g->dev);
5233 if (g->power_on) { 5233 if (g->power_on) {
5234 mutex_lock(&mm->l2_op_lock); 5234 nvgpu_mutex_acquire(&mm->l2_op_lock);
5235 gk20a_mm_l2_invalidate_locked(g); 5235 gk20a_mm_l2_invalidate_locked(g);
5236 mutex_unlock(&mm->l2_op_lock); 5236 nvgpu_mutex_release(&mm->l2_op_lock);
5237 } 5237 }
5238 pm_runtime_put_noidle(g->dev); 5238 pm_runtime_put_noidle(g->dev);
5239} 5239}
@@ -5252,7 +5252,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
5252 5252
5253 nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER); 5253 nvgpu_timeout_init(g, &timeout, 2000, NVGPU_TIMER_RETRY_TIMER);
5254 5254
5255 mutex_lock(&mm->l2_op_lock); 5255 nvgpu_mutex_acquire(&mm->l2_op_lock);
5256 5256
5257 trace_gk20a_mm_l2_flush(dev_name(g->dev)); 5257 trace_gk20a_mm_l2_flush(dev_name(g->dev));
5258 5258
@@ -5280,7 +5280,7 @@ void gk20a_mm_l2_flush(struct gk20a *g, bool invalidate)
5280 if (invalidate) 5280 if (invalidate)
5281 gk20a_mm_l2_invalidate_locked(g); 5281 gk20a_mm_l2_invalidate_locked(g);
5282 5282
5283 mutex_unlock(&mm->l2_op_lock); 5283 nvgpu_mutex_release(&mm->l2_op_lock);
5284 5284
5285hw_was_off: 5285hw_was_off:
5286 pm_runtime_put_noidle(g->dev); 5286 pm_runtime_put_noidle(g->dev);
@@ -5300,7 +5300,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
5300 5300
5301 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER); 5301 nvgpu_timeout_init(g, &timeout, 200, NVGPU_TIMER_RETRY_TIMER);
5302 5302
5303 mutex_lock(&mm->l2_op_lock); 5303 nvgpu_mutex_acquire(&mm->l2_op_lock);
5304 5304
5305 /* Flush all dirty lines from the CBC to L2 */ 5305 /* Flush all dirty lines from the CBC to L2 */
5306 gk20a_writel(g, flush_l2_clean_comptags_r(), 5306 gk20a_writel(g, flush_l2_clean_comptags_r(),
@@ -5320,7 +5320,7 @@ void gk20a_mm_cbc_clean(struct gk20a *g)
5320 } while (!nvgpu_timeout_expired_msg(&timeout, 5320 } while (!nvgpu_timeout_expired_msg(&timeout,
5321 "l2_clean_comptags too many retries")); 5321 "l2_clean_comptags too many retries"));
5322 5322
5323 mutex_unlock(&mm->l2_op_lock); 5323 nvgpu_mutex_release(&mm->l2_op_lock);
5324 5324
5325hw_was_off: 5325hw_was_off:
5326 pm_runtime_put_noidle(g->dev); 5326 pm_runtime_put_noidle(g->dev);
@@ -5334,19 +5334,19 @@ int gk20a_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va,
5334 5334
5335 gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); 5335 gk20a_dbg_fn("gpu_va=0x%llx", gpu_va);
5336 5336
5337 mutex_lock(&vm->update_gmmu_lock); 5337 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
5338 5338
5339 mapped_buffer = find_mapped_buffer_range_locked(&vm->mapped_buffers, 5339 mapped_buffer = find_mapped_buffer_range_locked(&vm->mapped_buffers,
5340 gpu_va); 5340 gpu_va);
5341 if (!mapped_buffer) { 5341 if (!mapped_buffer) {
5342 mutex_unlock(&vm->update_gmmu_lock); 5342 nvgpu_mutex_release(&vm->update_gmmu_lock);
5343 return -EINVAL; 5343 return -EINVAL;
5344 } 5344 }
5345 5345
5346 *dmabuf = mapped_buffer->dmabuf; 5346 *dmabuf = mapped_buffer->dmabuf;
5347 *offset = gpu_va - mapped_buffer->addr; 5347 *offset = gpu_va - mapped_buffer->addr;
5348 5348
5349 mutex_unlock(&vm->update_gmmu_lock); 5349 nvgpu_mutex_release(&vm->update_gmmu_lock);
5350 5350
5351 return 0; 5351 return 0;
5352} 5352}
@@ -5373,7 +5373,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
5373 5373
5374 addr_lo = u64_lo32(gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0) >> 12); 5374 addr_lo = u64_lo32(gk20a_mem_get_base_addr(g, &vm->pdb.mem, 0) >> 12);
5375 5375
5376 mutex_lock(&tlb_lock); 5376 nvgpu_mutex_acquire(&tlb_lock);
5377 5377
5378 trace_gk20a_mm_tlb_invalidate(dev_name(g->dev)); 5378 trace_gk20a_mm_tlb_invalidate(dev_name(g->dev));
5379 5379
@@ -5414,7 +5414,7 @@ void gk20a_mm_tlb_invalidate(struct vm_gk20a *vm)
5414 trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev)); 5414 trace_gk20a_mm_tlb_invalidate_done(dev_name(g->dev));
5415 5415
5416out: 5416out:
5417 mutex_unlock(&tlb_lock); 5417 nvgpu_mutex_release(&tlb_lock);
5418} 5418}
5419 5419
5420int gk20a_mm_suspend(struct gk20a *g) 5420int gk20a_mm_suspend(struct gk20a *g)