diff options
Diffstat (limited to 'drivers/gpu/nvgpu/common')
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vm.c | 202 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/linux/vm_priv.h | 10 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm.c | 134 | ||||
-rw-r--r-- | drivers/gpu/nvgpu/common/mm/vm_area.c | 2 |
4 files changed, 339 insertions, 9 deletions
diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index b686d616..638d3e51 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c | |||
@@ -115,6 +115,108 @@ static u64 nvgpu_get_buffer_alignment(struct gk20a *g, struct scatterlist *sgl, | |||
115 | return align; | 115 | return align; |
116 | } | 116 | } |
117 | 117 | ||
118 | static int setup_kind_legacy(struct vm_gk20a *vm, struct buffer_attrs *bfr, | ||
119 | bool *pkind_compressible) | ||
120 | { | ||
121 | struct gk20a *g = gk20a_from_vm(vm); | ||
122 | bool kind_compressible; | ||
123 | |||
124 | if (unlikely(bfr->kind_v == g->ops.mm.get_kind_invalid())) | ||
125 | bfr->kind_v = g->ops.mm.get_kind_pitch(); | ||
126 | |||
127 | if (unlikely(!gk20a_kind_is_supported(bfr->kind_v))) { | ||
128 | nvgpu_err(g, "kind 0x%x not supported", bfr->kind_v); | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | |||
132 | bfr->uc_kind_v = g->ops.mm.get_kind_invalid(); | ||
133 | /* find a suitable incompressible kind if it becomes necessary later */ | ||
134 | kind_compressible = gk20a_kind_is_compressible(bfr->kind_v); | ||
135 | if (kind_compressible) { | ||
136 | bfr->uc_kind_v = gk20a_get_uncompressed_kind(bfr->kind_v); | ||
137 | if (unlikely(bfr->uc_kind_v == g->ops.mm.get_kind_invalid())) { | ||
138 | /* shouldn't happen, but it is worth cross-checking */ | ||
139 | nvgpu_err(g, "comptag kind 0x%x can't be" | ||
140 | " downgraded to uncompressed kind", | ||
141 | bfr->kind_v); | ||
142 | return -EINVAL; | ||
143 | } | ||
144 | } | ||
145 | |||
146 | *pkind_compressible = kind_compressible; | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int setup_buffer_kind_and_compression(struct vm_gk20a *vm, | ||
151 | u32 flags, | ||
152 | struct buffer_attrs *bfr, | ||
153 | enum gmmu_pgsz_gk20a pgsz_idx) | ||
154 | { | ||
155 | bool kind_compressible; | ||
156 | struct gk20a *g = gk20a_from_vm(vm); | ||
157 | int ctag_granularity = g->ops.fb.compression_page_size(g); | ||
158 | |||
159 | if (!bfr->use_kind_v) | ||
160 | bfr->kind_v = g->ops.mm.get_kind_invalid(); | ||
161 | if (!bfr->use_uc_kind_v) | ||
162 | bfr->uc_kind_v = g->ops.mm.get_kind_invalid(); | ||
163 | |||
164 | if (flags & NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL) { | ||
165 | kind_compressible = (bfr->kind_v != | ||
166 | g->ops.mm.get_kind_invalid()); | ||
167 | if (!kind_compressible) | ||
168 | bfr->kind_v = bfr->uc_kind_v; | ||
169 | } else { | ||
170 | int err = setup_kind_legacy(vm, bfr, &kind_compressible); | ||
171 | |||
172 | if (err) | ||
173 | return err; | ||
174 | } | ||
175 | |||
176 | /* comptags only supported for suitable kinds, 128KB pagesize */ | ||
177 | if (kind_compressible && | ||
178 | vm->gmmu_page_sizes[pgsz_idx] < | ||
179 | g->ops.fb.compressible_page_size(g)) { | ||
180 | /* it is safe to fall back to uncompressed as | ||
181 | functionality is not harmed */ | ||
182 | bfr->kind_v = bfr->uc_kind_v; | ||
183 | kind_compressible = false; | ||
184 | } | ||
185 | if (kind_compressible) | ||
186 | bfr->ctag_lines = DIV_ROUND_UP_ULL(bfr->size, ctag_granularity); | ||
187 | else | ||
188 | bfr->ctag_lines = 0; | ||
189 | |||
190 | bfr->use_kind_v = (bfr->kind_v != g->ops.mm.get_kind_invalid()); | ||
191 | bfr->use_uc_kind_v = (bfr->uc_kind_v != g->ops.mm.get_kind_invalid()); | ||
192 | |||
193 | return 0; | ||
194 | } | ||
195 | |||
196 | int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, | ||
197 | struct dma_buf **dmabuf, | ||
198 | u64 *offset) | ||
199 | { | ||
200 | struct nvgpu_mapped_buf *mapped_buffer; | ||
201 | |||
202 | gk20a_dbg_fn("gpu_va=0x%llx", gpu_va); | ||
203 | |||
204 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
205 | |||
206 | mapped_buffer = __nvgpu_vm_find_mapped_buf_range(vm, gpu_va); | ||
207 | if (!mapped_buffer) { | ||
208 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
209 | return -EINVAL; | ||
210 | } | ||
211 | |||
212 | *dmabuf = mapped_buffer->dmabuf; | ||
213 | *offset = gpu_va - mapped_buffer->addr; | ||
214 | |||
215 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
118 | /* | 220 | /* |
119 | * vm->update_gmmu_lock must be held. This checks to see if we already have | 221 | * vm->update_gmmu_lock must be held. This checks to see if we already have |
120 | * mapped the passed buffer into this VM. If so, just return the existing | 222 | * mapped the passed buffer into this VM. If so, just return the existing |
@@ -478,6 +580,67 @@ clean_up: | |||
478 | return 0; | 580 | return 0; |
479 | } | 581 | } |
480 | 582 | ||
583 | int nvgpu_vm_map_buffer(struct vm_gk20a *vm, | ||
584 | int dmabuf_fd, | ||
585 | u64 *offset_align, | ||
586 | u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/ | ||
587 | s16 compr_kind, | ||
588 | s16 incompr_kind, | ||
589 | u64 buffer_offset, | ||
590 | u64 mapping_size, | ||
591 | struct vm_gk20a_mapping_batch *batch) | ||
592 | { | ||
593 | int err = 0; | ||
594 | struct dma_buf *dmabuf; | ||
595 | u64 ret_va; | ||
596 | |||
597 | gk20a_dbg_fn(""); | ||
598 | |||
599 | /* get ref to the mem handle (released on unmap_locked) */ | ||
600 | dmabuf = dma_buf_get(dmabuf_fd); | ||
601 | if (IS_ERR(dmabuf)) { | ||
602 | nvgpu_warn(gk20a_from_vm(vm), "%s: fd %d is not a dmabuf", | ||
603 | __func__, dmabuf_fd); | ||
604 | return PTR_ERR(dmabuf); | ||
605 | } | ||
606 | |||
607 | /* verify that we're not overflowing the buffer, i.e. | ||
608 | * (buffer_offset + mapping_size)> dmabuf->size. | ||
609 | * | ||
610 | * Since buffer_offset + mapping_size could overflow, first check | ||
611 | * that mapping size < dmabuf_size, at which point we can subtract | ||
612 | * mapping_size from both sides for the final comparison. | ||
613 | */ | ||
614 | if ((mapping_size > dmabuf->size) || | ||
615 | (buffer_offset > (dmabuf->size - mapping_size))) { | ||
616 | nvgpu_err(gk20a_from_vm(vm), | ||
617 | "buf size %llx < (offset(%llx) + map_size(%llx))\n", | ||
618 | (u64)dmabuf->size, buffer_offset, mapping_size); | ||
619 | return -EINVAL; | ||
620 | } | ||
621 | |||
622 | err = gk20a_dmabuf_alloc_drvdata(dmabuf, dev_from_vm(vm)); | ||
623 | if (err) { | ||
624 | dma_buf_put(dmabuf); | ||
625 | return err; | ||
626 | } | ||
627 | |||
628 | ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align, | ||
629 | flags, compr_kind, incompr_kind, true, | ||
630 | gk20a_mem_flag_none, | ||
631 | buffer_offset, | ||
632 | mapping_size, | ||
633 | batch); | ||
634 | |||
635 | *offset_align = ret_va; | ||
636 | if (!ret_va) { | ||
637 | dma_buf_put(dmabuf); | ||
638 | err = -EINVAL; | ||
639 | } | ||
640 | |||
641 | return err; | ||
642 | } | ||
643 | |||
481 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) | 644 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) |
482 | { | 645 | { |
483 | struct gk20a *g = vm->mm->g; | 646 | struct gk20a *g = vm->mm->g; |
@@ -491,6 +654,43 @@ void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) | |||
491 | return; | 654 | return; |
492 | } | 655 | } |
493 | 656 | ||
494 | nvgpu_ref_put(&mapped_buffer->ref, gk20a_vm_unmap_locked_ref); | 657 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); |
495 | nvgpu_mutex_release(&vm->update_gmmu_lock); | 658 | nvgpu_mutex_release(&vm->update_gmmu_lock); |
496 | } | 659 | } |
660 | |||
661 | /* NOTE! mapped_buffers lock must be held */ | ||
662 | void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, | ||
663 | struct vm_gk20a_mapping_batch *batch) | ||
664 | { | ||
665 | struct vm_gk20a *vm = mapped_buffer->vm; | ||
666 | struct gk20a *g = vm->mm->g; | ||
667 | |||
668 | g->ops.mm.gmmu_unmap(vm, | ||
669 | mapped_buffer->addr, | ||
670 | mapped_buffer->size, | ||
671 | mapped_buffer->pgsz_idx, | ||
672 | mapped_buffer->va_allocated, | ||
673 | gk20a_mem_flag_none, | ||
674 | mapped_buffer->vm_area ? | ||
675 | mapped_buffer->vm_area->sparse : false, | ||
676 | batch); | ||
677 | |||
678 | gk20a_mm_unpin(dev_from_vm(vm), mapped_buffer->dmabuf, | ||
679 | mapped_buffer->sgt); | ||
680 | |||
681 | /* remove from mapped buffer tree and remove list, free */ | ||
682 | nvgpu_remove_mapped_buf(vm, mapped_buffer); | ||
683 | if (!nvgpu_list_empty(&mapped_buffer->buffer_list)) | ||
684 | nvgpu_list_del(&mapped_buffer->buffer_list); | ||
685 | |||
686 | /* keep track of mapped buffers */ | ||
687 | if (mapped_buffer->user_mapped) | ||
688 | vm->num_user_mapped_buffers--; | ||
689 | |||
690 | if (mapped_buffer->own_mem_ref) | ||
691 | dma_buf_put(mapped_buffer->dmabuf); | ||
692 | |||
693 | nvgpu_kfree(g, mapped_buffer); | ||
694 | |||
695 | return; | ||
696 | } | ||
diff --git a/drivers/gpu/nvgpu/common/linux/vm_priv.h b/drivers/gpu/nvgpu/common/linux/vm_priv.h index fa173d59..be7efa8b 100644 --- a/drivers/gpu/nvgpu/common/linux/vm_priv.h +++ b/drivers/gpu/nvgpu/common/linux/vm_priv.h | |||
@@ -88,9 +88,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm, | |||
88 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset); | 88 | void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset); |
89 | 89 | ||
90 | /* find buffer corresponding to va */ | 90 | /* find buffer corresponding to va */ |
91 | int nvgpu_vm_find_buffer(struct vm_gk20a *vm, u64 gpu_va, | 91 | int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, |
92 | struct dma_buf **dmabuf, | 92 | struct dma_buf **dmabuf, |
93 | u64 *offset); | 93 | u64 *offset); |
94 | 94 | ||
95 | enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, | 95 | enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, |
96 | struct dma_buf *dmabuf); | 96 | struct dma_buf *dmabuf); |
@@ -98,9 +98,5 @@ int validate_fixed_buffer(struct vm_gk20a *vm, | |||
98 | struct buffer_attrs *bfr, | 98 | struct buffer_attrs *bfr, |
99 | u64 map_offset, u64 map_size, | 99 | u64 map_offset, u64 map_size, |
100 | struct nvgpu_vm_area **pva_node); | 100 | struct nvgpu_vm_area **pva_node); |
101 | int setup_buffer_kind_and_compression(struct vm_gk20a *vm, | ||
102 | u32 flags, | ||
103 | struct buffer_attrs *bfr, | ||
104 | enum gmmu_pgsz_gk20a pgsz_idx); | ||
105 | 101 | ||
106 | #endif | 102 | #endif |
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index f8d58349..9f04ee01 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c | |||
@@ -641,3 +641,137 @@ struct nvgpu_mapped_buf *__nvgpu_vm_find_mapped_buf_less_than( | |||
641 | 641 | ||
642 | return mapped_buffer_from_rbtree_node(node); | 642 | return mapped_buffer_from_rbtree_node(node); |
643 | } | 643 | } |
644 | |||
645 | int nvgpu_vm_get_buffers(struct vm_gk20a *vm, | ||
646 | struct nvgpu_mapped_buf ***mapped_buffers, | ||
647 | int *num_buffers) | ||
648 | { | ||
649 | struct nvgpu_mapped_buf *mapped_buffer; | ||
650 | struct nvgpu_mapped_buf **buffer_list; | ||
651 | struct nvgpu_rbtree_node *node = NULL; | ||
652 | int i = 0; | ||
653 | |||
654 | if (vm->userspace_managed) { | ||
655 | *mapped_buffers = NULL; | ||
656 | *num_buffers = 0; | ||
657 | return 0; | ||
658 | } | ||
659 | |||
660 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
661 | |||
662 | buffer_list = nvgpu_big_zalloc(vm->mm->g, sizeof(*buffer_list) * | ||
663 | vm->num_user_mapped_buffers); | ||
664 | if (!buffer_list) { | ||
665 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
666 | return -ENOMEM; | ||
667 | } | ||
668 | |||
669 | nvgpu_rbtree_enum_start(0, &node, vm->mapped_buffers); | ||
670 | while (node) { | ||
671 | mapped_buffer = mapped_buffer_from_rbtree_node(node); | ||
672 | if (mapped_buffer->user_mapped) { | ||
673 | buffer_list[i] = mapped_buffer; | ||
674 | nvgpu_ref_get(&mapped_buffer->ref); | ||
675 | i++; | ||
676 | } | ||
677 | nvgpu_rbtree_enum_next(&node, node); | ||
678 | } | ||
679 | |||
680 | BUG_ON(i != vm->num_user_mapped_buffers); | ||
681 | |||
682 | *num_buffers = vm->num_user_mapped_buffers; | ||
683 | *mapped_buffers = buffer_list; | ||
684 | |||
685 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
686 | |||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) | ||
691 | { | ||
692 | struct nvgpu_mapped_buf *mapped_buffer = | ||
693 | container_of(ref, struct nvgpu_mapped_buf, ref); | ||
694 | nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); | ||
695 | } | ||
696 | |||
697 | void nvgpu_vm_put_buffers(struct vm_gk20a *vm, | ||
698 | struct nvgpu_mapped_buf **mapped_buffers, | ||
699 | int num_buffers) | ||
700 | { | ||
701 | int i; | ||
702 | struct vm_gk20a_mapping_batch batch; | ||
703 | |||
704 | if (num_buffers == 0) | ||
705 | return; | ||
706 | |||
707 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
708 | nvgpu_vm_mapping_batch_start(&batch); | ||
709 | vm->kref_put_batch = &batch; | ||
710 | |||
711 | for (i = 0; i < num_buffers; ++i) | ||
712 | nvgpu_ref_put(&mapped_buffers[i]->ref, | ||
713 | nvgpu_vm_unmap_locked_ref); | ||
714 | |||
715 | vm->kref_put_batch = NULL; | ||
716 | nvgpu_vm_mapping_batch_finish_locked(vm, &batch); | ||
717 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
718 | |||
719 | nvgpu_big_free(vm->mm->g, mapped_buffers); | ||
720 | } | ||
721 | |||
722 | static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, | ||
723 | struct vm_gk20a_mapping_batch *batch) | ||
724 | { | ||
725 | struct gk20a *g = vm->mm->g; | ||
726 | struct nvgpu_mapped_buf *mapped_buffer; | ||
727 | |||
728 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
729 | |||
730 | mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); | ||
731 | if (!mapped_buffer) { | ||
732 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
733 | nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); | ||
734 | return; | ||
735 | } | ||
736 | |||
737 | if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { | ||
738 | struct nvgpu_timeout timeout; | ||
739 | |||
740 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
741 | |||
742 | nvgpu_timeout_init(vm->mm->g, &timeout, 10000, | ||
743 | NVGPU_TIMER_RETRY_TIMER); | ||
744 | do { | ||
745 | if (nvgpu_atomic_read( | ||
746 | &mapped_buffer->ref.refcount) == 1) | ||
747 | break; | ||
748 | nvgpu_udelay(5); | ||
749 | } while (!nvgpu_timeout_expired_msg(&timeout, | ||
750 | "sync-unmap failed on 0x%llx")); | ||
751 | |||
752 | nvgpu_mutex_acquire(&vm->update_gmmu_lock); | ||
753 | } | ||
754 | |||
755 | if (mapped_buffer->user_mapped == 0) { | ||
756 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
757 | nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | mapped_buffer->user_mapped--; | ||
762 | if (mapped_buffer->user_mapped == 0) | ||
763 | vm->num_user_mapped_buffers--; | ||
764 | |||
765 | vm->kref_put_batch = batch; | ||
766 | nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); | ||
767 | vm->kref_put_batch = NULL; | ||
768 | |||
769 | nvgpu_mutex_release(&vm->update_gmmu_lock); | ||
770 | } | ||
771 | |||
772 | int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, | ||
773 | struct vm_gk20a_mapping_batch *batch) | ||
774 | { | ||
775 | nvgpu_vm_unmap_user(vm, offset, batch); | ||
776 | return 0; | ||
777 | } | ||
diff --git a/drivers/gpu/nvgpu/common/mm/vm_area.c b/drivers/gpu/nvgpu/common/mm/vm_area.c index de1623bc..88758b85 100644 --- a/drivers/gpu/nvgpu/common/mm/vm_area.c +++ b/drivers/gpu/nvgpu/common/mm/vm_area.c | |||
@@ -208,7 +208,7 @@ int nvgpu_vm_area_free(struct vm_gk20a *vm, u64 addr) | |||
208 | &vm_area->buffer_list_head, | 208 | &vm_area->buffer_list_head, |
209 | nvgpu_mapped_buf, buffer_list) { | 209 | nvgpu_mapped_buf, buffer_list) { |
210 | nvgpu_list_del(&buffer->buffer_list); | 210 | nvgpu_list_del(&buffer->buffer_list); |
211 | nvgpu_ref_put(&buffer->ref, gk20a_vm_unmap_locked_ref); | 211 | nvgpu_ref_put(&buffer->ref, nvgpu_vm_unmap_locked_ref); |
212 | } | 212 | } |
213 | 213 | ||
214 | /* if this was a sparse mapping, free the va */ | 214 | /* if this was a sparse mapping, free the va */ |