summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/common/mm/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2017-10-12 19:19:55 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2017-10-24 18:16:50 -0400
commit0c5d0c6a9ef0e33f01ce1485674bb2271e4bb580 (patch)
tree15ce6b6d41decce809e62c996be94492263daede /drivers/gpu/nvgpu/common/mm/vm.c
parent2a285d0607a20694476399f5719e74dbc26fcd58 (diff)
gpu: nvgpu: Begin reorganizing VM mapping/unmapping
Move vm_priv.h to <nvgpu/linux/vm.h> and rename nvgpu_vm_map() to nvgpu_vm_map_linux(). Also remove a redundant unmap function from the unmap path. These changes are the beginning of reworking the nvgpu Linux mapping and unmapping code. The rest of this patch is just the necessary changes to use the new map function naming and the new path to the Linux vm header. Patch Series Goal ----------------- There's two major goals for this patch series. Note that these goals are not achieved in this patch. There will be subsequent patches. 1. Remove all last vestiges of Linux code from common/mm/vm.c 2. Implement map caching in the common/mm/vm.c code To accomplish this firstly the VM mapping code needs to have the struct nvgpu_mapped_buf data struct be completely Linux free. That means implementing an abstraction for this to hold the Linux stuff that mapped buffers carry about (SGT, dma_buf). This is why the vm_priv.h code has been moved: it will need to be included by the <nvgpu/vm.h> header so that the OS specific struct can be pulled into struct nvgpu_mapped_buf. Next renaming the nvgpu_vm_map() to nvgpu_vm_map_linux() is in preparation for adding a new nvgpu_vm_map() that handles the map caching with nvgpu_mapped_buf. The mapping code is fairly straight forward: nvgpu_vm_map does OS generic stuff; each OS then calls this function from an nvgpu_vm_map_<OS>() or the like that does any OS specific adjustments/management. Freeing buffers is much more tricky however. The maps are all reference counted since userspace does not track buffers and expects us to handle this instead. Ugh! Since there's ref-counts the free code will require a callback into the OS specific code since the OS specific code cannot free a buffer directly. THis make's the path for freeing a buffer quite convoluted. JIRA NVGPU-30 JIRA NVGPU-71 Change-Id: I5e0975f60663a0d6cf0a6bd90e099f51e02c2395 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1578896 GVS: Gerrit_Virtual_Submit Reviewed-by: David Martinez Nieto <dmartineznie@nvidia.com> Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/common/mm/vm.c')
-rw-r--r--drivers/gpu/nvgpu/common/mm/vm.c56
1 files changed, 8 insertions, 48 deletions
diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c
index 9f04ee01..c6c99b31 100644
--- a/drivers/gpu/nvgpu/common/mm/vm.c
+++ b/drivers/gpu/nvgpu/common/mm/vm.c
@@ -687,13 +687,6 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm,
687 return 0; 687 return 0;
688} 688}
689 689
690void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
691{
692 struct nvgpu_mapped_buf *mapped_buffer =
693 container_of(ref, struct nvgpu_mapped_buf, ref);
694 nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
695}
696
697void nvgpu_vm_put_buffers(struct vm_gk20a *vm, 690void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
698 struct nvgpu_mapped_buf **mapped_buffers, 691 struct nvgpu_mapped_buf **mapped_buffers,
699 int num_buffers) 692 int num_buffers)
@@ -719,14 +712,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm,
719 nvgpu_big_free(vm->mm->g, mapped_buffers); 712 nvgpu_big_free(vm->mm->g, mapped_buffers);
720} 713}
721 714
722static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, 715void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref)
723 struct vm_gk20a_mapping_batch *batch) 716{
717 struct nvgpu_mapped_buf *mapped_buffer =
718 container_of(ref, struct nvgpu_mapped_buf, ref);
719 nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch);
720}
721
722void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset)
724{ 723{
725 struct gk20a *g = vm->mm->g; 724 struct gk20a *g = vm->mm->g;
726 struct nvgpu_mapped_buf *mapped_buffer; 725 struct nvgpu_mapped_buf *mapped_buffer;
727 726
728 nvgpu_mutex_acquire(&vm->update_gmmu_lock); 727 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
729
730 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); 728 mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset);
731 if (!mapped_buffer) { 729 if (!mapped_buffer) {
732 nvgpu_mutex_release(&vm->update_gmmu_lock); 730 nvgpu_mutex_release(&vm->update_gmmu_lock);
@@ -734,44 +732,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset,
734 return; 732 return;
735 } 733 }
736 734
737 if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) {
738 struct nvgpu_timeout timeout;
739
740 nvgpu_mutex_release(&vm->update_gmmu_lock);
741
742 nvgpu_timeout_init(vm->mm->g, &timeout, 10000,
743 NVGPU_TIMER_RETRY_TIMER);
744 do {
745 if (nvgpu_atomic_read(
746 &mapped_buffer->ref.refcount) == 1)
747 break;
748 nvgpu_udelay(5);
749 } while (!nvgpu_timeout_expired_msg(&timeout,
750 "sync-unmap failed on 0x%llx"));
751
752 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
753 }
754
755 if (mapped_buffer->user_mapped == 0) {
756 nvgpu_mutex_release(&vm->update_gmmu_lock);
757 nvgpu_err(g, "addr already unmapped from user 0x%llx", offset);
758 return;
759 }
760
761 mapped_buffer->user_mapped--;
762 if (mapped_buffer->user_mapped == 0)
763 vm->num_user_mapped_buffers--;
764
765 vm->kref_put_batch = batch;
766 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); 735 nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref);
767 vm->kref_put_batch = NULL;
768
769 nvgpu_mutex_release(&vm->update_gmmu_lock); 736 nvgpu_mutex_release(&vm->update_gmmu_lock);
770} 737}
771
772int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset,
773 struct vm_gk20a_mapping_batch *batch)
774{
775 nvgpu_vm_unmap_user(vm, offset, batch);
776 return 0;
777}