From 0c5d0c6a9ef0e33f01ce1485674bb2271e4bb580 Mon Sep 17 00:00:00 2001 From: Alex Waterman Date: Thu, 12 Oct 2017 16:19:55 -0700 Subject: gpu: nvgpu: Begin reorganizing VM mapping/unmapping Move vm_priv.h to and rename nvgpu_vm_map() to nvgpu_vm_map_linux(). Also remove a redundant unmap function from the unmap path. These changes are the beginning of reworking the nvgpu Linux mapping and unmapping code. The rest of this patch is just the necessary changes to use the new map function naming and the new path to the Linux vm header. Patch Series Goal ----------------- There's two major goals for this patch series. Note that these goals are not achieved in this patch. There will be subsequent patches. 1. Remove all last vestiges of Linux code from common/mm/vm.c 2. Implement map caching in the common/mm/vm.c code To accomplish this firstly the VM mapping code needs to have the struct nvgpu_mapped_buf data struct be completely Linux free. That means implementing an abstraction for this to hold the Linux stuff that mapped buffers carry about (SGT, dma_buf). This is why the vm_priv.h code has been moved: it will need to be included by the header so that the OS specific struct can be pulled into struct nvgpu_mapped_buf. Next renaming the nvgpu_vm_map() to nvgpu_vm_map_linux() is in preparation for adding a new nvgpu_vm_map() that handles the map caching with nvgpu_mapped_buf. The mapping code is fairly straight forward: nvgpu_vm_map does OS generic stuff; each OS then calls this function from an nvgpu_vm_map_() or the like that does any OS specific adjustments/management. Freeing buffers is much more tricky however. The maps are all reference counted since userspace does not track buffers and expects us to handle this instead. Ugh! Since there's ref-counts the free code will require a callback into the OS specific code since the OS specific code cannot free a buffer directly. THis make's the path for freeing a buffer quite convoluted. JIRA NVGPU-30 JIRA NVGPU-71 Change-Id: I5e0975f60663a0d6cf0a6bd90e099f51e02c2395 Signed-off-by: Alex Waterman Reviewed-on: https://git-master.nvidia.com/r/1578896 GVS: Gerrit_Virtual_Submit Reviewed-by: David Martinez Nieto Reviewed-by: Terje Bergstrom --- drivers/gpu/nvgpu/common/linux/cde.c | 12 ++-- drivers/gpu/nvgpu/common/linux/dmabuf.c | 2 +- drivers/gpu/nvgpu/common/linux/ioctl_as.c | 6 +- drivers/gpu/nvgpu/common/linux/ioctl_dbg.c | 3 +- drivers/gpu/nvgpu/common/linux/vidmem.c | 3 +- drivers/gpu/nvgpu/common/linux/vm.c | 76 +++++++++++++++------ drivers/gpu/nvgpu/common/linux/vm_priv.h | 102 ---------------------------- drivers/gpu/nvgpu/common/mm/vm.c | 56 +++------------ drivers/gpu/nvgpu/gk20a/channel_gk20a.c | 14 ++-- drivers/gpu/nvgpu/include/nvgpu/linux/vm.h | 105 +++++++++++++++++++++++++++++ drivers/gpu/nvgpu/include/nvgpu/vm.h | 6 +- drivers/gpu/nvgpu/vgpu/mm_vgpu.c | 3 +- 12 files changed, 188 insertions(+), 200 deletions(-) delete mode 100644 drivers/gpu/nvgpu/common/linux/vm_priv.h create mode 100644 drivers/gpu/nvgpu/include/nvgpu/linux/vm.h diff --git a/drivers/gpu/nvgpu/common/linux/cde.c b/drivers/gpu/nvgpu/common/linux/cde.c index 577d86e8..30edc1d5 100644 --- a/drivers/gpu/nvgpu/common/linux/cde.c +++ b/drivers/gpu/nvgpu/common/linux/cde.c @@ -31,6 +31,8 @@ #include #include +#include + #include "gk20a/gk20a.h" #include "gk20a/channel_gk20a.h" #include "gk20a/mm_gk20a.h" @@ -44,12 +46,6 @@ #include #include -/* - * Currently this code uses nvgpu_vm_map() since it takes dmabuf FDs from the - * CDE ioctls. That has to change - instead this needs to take an nvgpu_mem. - */ -#include "common/linux/vm_priv.h" - static int gk20a_cde_load(struct gk20a_cde_ctx *cde_ctx); static struct gk20a_cde_ctx *gk20a_cde_allocate_context(struct nvgpu_os_linux *l); @@ -1052,8 +1048,8 @@ __releases(&l->cde_app->mutex) /* map the destination buffer */ - get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map */ - map_vaddr = nvgpu_vm_map(cde_ctx->vm, compbits_scatter_buf, 0, + get_dma_buf(compbits_scatter_buf); /* a ref for nvgpu_vm_map_linux */ + map_vaddr = nvgpu_vm_map_linux(cde_ctx->vm, compbits_scatter_buf, 0, NVGPU_AS_MAP_BUFFER_FLAGS_CACHEABLE | NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL, NV_KIND_INVALID, diff --git a/drivers/gpu/nvgpu/common/linux/dmabuf.c b/drivers/gpu/nvgpu/common/linux/dmabuf.c index 0b07b255..2415b7c2 100644 --- a/drivers/gpu/nvgpu/common/linux/dmabuf.c +++ b/drivers/gpu/nvgpu/common/linux/dmabuf.c @@ -21,13 +21,13 @@ #include #include +#include #include #include "gk20a/gk20a.h" #include "gk20a/platform_gk20a.h" #include "dmabuf.h" -#include "vm_priv.h" #include "os_linux.h" static void gk20a_mm_delete_priv(void *_priv) diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_as.c b/drivers/gpu/nvgpu/common/linux/ioctl_as.c index cfc4e7ef..08064370 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_as.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_as.c @@ -17,19 +17,19 @@ #include #include -#include - #include #include #include #include +#include + +#include #include "gk20a/gk20a.h" #include "gk20a/platform_gk20a.h" #include "ioctl_as.h" -#include "vm_priv.h" #include "os_linux.h" static int gk20a_as_ioctl_bind_channel( diff --git a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c index c8831a97..7e62bb5c 100644 --- a/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c +++ b/drivers/gpu/nvgpu/common/linux/ioctl_dbg.c @@ -28,7 +28,9 @@ #include #include #include + #include +#include #include "gk20a/gk20a.h" #include "gk20a/platform_gk20a.h" @@ -38,7 +40,6 @@ #include "os_linux.h" #include "ioctl_dbg.h" -#include "vm_priv.h" /* silly allocator - just increment id */ static nvgpu_atomic_t unique_id = NVGPU_ATOMIC_INIT(0); diff --git a/drivers/gpu/nvgpu/common/linux/vidmem.c b/drivers/gpu/nvgpu/common/linux/vidmem.c index 03976da3..1e65b54d 100644 --- a/drivers/gpu/nvgpu/common/linux/vidmem.c +++ b/drivers/gpu/nvgpu/common/linux/vidmem.c @@ -24,14 +24,13 @@ #include #include +#include #include #include #include "gk20a/gk20a.h" #include "gk20a/mm_gk20a.h" -#include "vm_priv.h" - bool nvgpu_addr_is_vidmem_page_alloc(u64 addr) { return !!(addr & 1ULL); diff --git a/drivers/gpu/nvgpu/common/linux/vm.c b/drivers/gpu/nvgpu/common/linux/vm.c index 638d3e51..984c2015 100644 --- a/drivers/gpu/nvgpu/common/linux/vm.c +++ b/drivers/gpu/nvgpu/common/linux/vm.c @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -33,7 +34,6 @@ #include "gk20a/kind_gk20a.h" #include "gk20a/platform_gk20a.h" -#include "vm_priv.h" #include "os_linux.h" #include "dmabuf.h" @@ -323,17 +323,17 @@ static int setup_bfr_kind_fields(struct buffer_attrs *bfr, s16 compr_kind, return 0; } -u64 nvgpu_vm_map(struct vm_gk20a *vm, - struct dma_buf *dmabuf, - u64 offset_align, - u32 flags, - s16 compr_kind, - s16 incompr_kind, - bool user_mapped, - int rw_flag, - u64 buffer_offset, - u64 mapping_size, - struct vm_gk20a_mapping_batch *batch) +u64 nvgpu_vm_map_linux(struct vm_gk20a *vm, + struct dma_buf *dmabuf, + u64 offset_align, + u32 flags, + s16 compr_kind, + s16 incompr_kind, + bool user_mapped, + int rw_flag, + u64 buffer_offset, + u64 mapping_size, + struct vm_gk20a_mapping_batch *batch) { struct gk20a *g = gk20a_from_vm(vm); struct device *dev = dev_from_gk20a(g); @@ -625,12 +625,12 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm, return err; } - ret_va = nvgpu_vm_map(vm, dmabuf, *offset_align, - flags, compr_kind, incompr_kind, true, - gk20a_mem_flag_none, - buffer_offset, - mapping_size, - batch); + ret_va = nvgpu_vm_map_linux(vm, dmabuf, *offset_align, + flags, compr_kind, incompr_kind, true, + gk20a_mem_flag_none, + buffer_offset, + mapping_size, + batch); *offset_align = ret_va; if (!ret_va) { @@ -641,21 +641,55 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm, return err; } -void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) +int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, + struct vm_gk20a_mapping_batch *batch) { struct gk20a *g = vm->mm->g; struct nvgpu_mapped_buf *mapped_buffer; nvgpu_mutex_acquire(&vm->update_gmmu_lock); + mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); if (!mapped_buffer) { nvgpu_mutex_release(&vm->update_gmmu_lock); nvgpu_err(g, "invalid addr to unmap 0x%llx", offset); - return; + return 0; + } + + if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { + struct nvgpu_timeout timeout; + + nvgpu_mutex_release(&vm->update_gmmu_lock); + + nvgpu_timeout_init(vm->mm->g, &timeout, 10000, + NVGPU_TIMER_RETRY_TIMER); + do { + if (nvgpu_atomic_read( + &mapped_buffer->ref.refcount) == 1) + break; + nvgpu_udelay(5); + } while (!nvgpu_timeout_expired_msg(&timeout, + "sync-unmap failed on 0x%llx")); + + nvgpu_mutex_acquire(&vm->update_gmmu_lock); + } + + if (mapped_buffer->user_mapped == 0) { + nvgpu_mutex_release(&vm->update_gmmu_lock); + nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); + return 0; } + mapped_buffer->user_mapped--; + if (mapped_buffer->user_mapped == 0) + vm->num_user_mapped_buffers--; + + vm->kref_put_batch = batch; nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); + vm->kref_put_batch = NULL; + nvgpu_mutex_release(&vm->update_gmmu_lock); + return 0; } /* NOTE! mapped_buffers lock must be held */ @@ -691,6 +725,4 @@ void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, dma_buf_put(mapped_buffer->dmabuf); nvgpu_kfree(g, mapped_buffer); - - return; } diff --git a/drivers/gpu/nvgpu/common/linux/vm_priv.h b/drivers/gpu/nvgpu/common/linux/vm_priv.h deleted file mode 100644 index be7efa8b..00000000 --- a/drivers/gpu/nvgpu/common/linux/vm_priv.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __COMMON_LINUX_VM_PRIV_H__ -#define __COMMON_LINUX_VM_PRIV_H__ - -#include - -struct sg_table; -struct dma_buf; - -struct vm_gk20a; -struct vm_gk20a_mapping_batch; - -struct buffer_attrs { - struct sg_table *sgt; - u64 size; - u64 align; - u32 ctag_offset; - u32 ctag_lines; - u32 ctag_allocated_lines; - int pgsz_idx; - u8 kind_v; - bool use_kind_v; - u8 uc_kind_v; - bool use_uc_kind_v; - bool ctag_user_mappable; -}; - -u64 nvgpu_vm_map(struct vm_gk20a *vm, - struct dma_buf *dmabuf, - u64 offset_align, - u32 flags, - - /* - * compressible kind if - * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is - * specified, otherwise just the kind - */ - s16 compr_kind, - - /* - * incompressible kind if - * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is - * specified, otherwise ignored - */ - s16 incompr_kind, - - bool user_mapped, - int rw_flag, - u64 buffer_offset, - u64 mapping_size, - struct vm_gk20a_mapping_batch *mapping_batch); - -/* - * Notes: - * - Batch may be NULL if map op is not part of a batch. - * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is set, - * compr_kind and incompr_kind work as explained in nvgpu.h. - * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is NOT set, - * compr_kind holds the kind and kernel will figure out whether - * it is a compressible or incompressible kind. If compressible, kernel will - * also figure out the incompressible counterpart or return an error. - */ -int nvgpu_vm_map_buffer(struct vm_gk20a *vm, - int dmabuf_fd, - u64 *offset_align, - u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */ - s16 compr_kind, - s16 incompr_kind, - u64 buffer_offset, - u64 mapping_size, - struct vm_gk20a_mapping_batch *batch); - -void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset); - -/* find buffer corresponding to va */ -int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, - struct dma_buf **dmabuf, - u64 *offset); - -enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, - struct dma_buf *dmabuf); -int validate_fixed_buffer(struct vm_gk20a *vm, - struct buffer_attrs *bfr, - u64 map_offset, u64 map_size, - struct nvgpu_vm_area **pva_node); - -#endif diff --git a/drivers/gpu/nvgpu/common/mm/vm.c b/drivers/gpu/nvgpu/common/mm/vm.c index 9f04ee01..c6c99b31 100644 --- a/drivers/gpu/nvgpu/common/mm/vm.c +++ b/drivers/gpu/nvgpu/common/mm/vm.c @@ -687,13 +687,6 @@ int nvgpu_vm_get_buffers(struct vm_gk20a *vm, return 0; } -void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) -{ - struct nvgpu_mapped_buf *mapped_buffer = - container_of(ref, struct nvgpu_mapped_buf, ref); - nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); -} - void nvgpu_vm_put_buffers(struct vm_gk20a *vm, struct nvgpu_mapped_buf **mapped_buffers, int num_buffers) @@ -719,14 +712,19 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, nvgpu_big_free(vm->mm->g, mapped_buffers); } -static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, - struct vm_gk20a_mapping_batch *batch) +void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref) +{ + struct nvgpu_mapped_buf *mapped_buffer = + container_of(ref, struct nvgpu_mapped_buf, ref); + nvgpu_vm_unmap_locked(mapped_buffer, mapped_buffer->vm->kref_put_batch); +} + +void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset) { struct gk20a *g = vm->mm->g; struct nvgpu_mapped_buf *mapped_buffer; nvgpu_mutex_acquire(&vm->update_gmmu_lock); - mapped_buffer = __nvgpu_vm_find_mapped_buf(vm, offset); if (!mapped_buffer) { nvgpu_mutex_release(&vm->update_gmmu_lock); @@ -734,44 +732,6 @@ static void nvgpu_vm_unmap_user(struct vm_gk20a *vm, u64 offset, return; } - if (mapped_buffer->flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) { - struct nvgpu_timeout timeout; - - nvgpu_mutex_release(&vm->update_gmmu_lock); - - nvgpu_timeout_init(vm->mm->g, &timeout, 10000, - NVGPU_TIMER_RETRY_TIMER); - do { - if (nvgpu_atomic_read( - &mapped_buffer->ref.refcount) == 1) - break; - nvgpu_udelay(5); - } while (!nvgpu_timeout_expired_msg(&timeout, - "sync-unmap failed on 0x%llx")); - - nvgpu_mutex_acquire(&vm->update_gmmu_lock); - } - - if (mapped_buffer->user_mapped == 0) { - nvgpu_mutex_release(&vm->update_gmmu_lock); - nvgpu_err(g, "addr already unmapped from user 0x%llx", offset); - return; - } - - mapped_buffer->user_mapped--; - if (mapped_buffer->user_mapped == 0) - vm->num_user_mapped_buffers--; - - vm->kref_put_batch = batch; nvgpu_ref_put(&mapped_buffer->ref, nvgpu_vm_unmap_locked_ref); - vm->kref_put_batch = NULL; - nvgpu_mutex_release(&vm->update_gmmu_lock); } - -int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, - struct vm_gk20a_mapping_batch *batch) -{ - nvgpu_vm_unmap_user(vm, offset, batch); - return 0; -} diff --git a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c index e3fc61c0..590506d6 100644 --- a/drivers/gpu/nvgpu/gk20a/channel_gk20a.c +++ b/drivers/gpu/nvgpu/gk20a/channel_gk20a.c @@ -43,6 +43,13 @@ #include #include +/* + * This is required for nvgpu_vm_find_buf() which is used in the tracing + * code. Once we can get and access userspace buffers without requiring + * direct dma_buf usage this can be removed. + */ +#include + #include "gk20a.h" #include "ctxsw_trace_gk20a.h" #include "dbg_gpu_gk20a.h" @@ -57,13 +64,6 @@ */ #include -/* - * This is required for nvgpu_vm_find_buffer() which is used in the tracing - * code. Once we can get and access userspace buffers without requiring - * direct dma_buf usage this can be removed. - */ -#include "common/linux/vm_priv.h" - /* * Although channels do have pointers back to the gk20a struct that they were * created under in cases where the driver is killed that pointer can be bad. diff --git a/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h new file mode 100644 index 00000000..91f0cf09 --- /dev/null +++ b/drivers/gpu/nvgpu/include/nvgpu/linux/vm.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __COMMON_LINUX_VM_PRIV_H__ +#define __COMMON_LINUX_VM_PRIV_H__ + +#include + +struct sg_table; +struct dma_buf; + +struct vm_gk20a; +struct vm_gk20a_mapping_batch; +struct nvgpu_vm_area; + +struct buffer_attrs { + struct sg_table *sgt; + u64 size; + u64 align; + u32 ctag_offset; + u32 ctag_lines; + u32 ctag_allocated_lines; + int pgsz_idx; + u8 kind_v; + bool use_kind_v; + u8 uc_kind_v; + bool use_uc_kind_v; + bool ctag_user_mappable; +}; + +u64 nvgpu_vm_map_linux(struct vm_gk20a *vm, + struct dma_buf *dmabuf, + u64 offset_align, + u32 flags, + + /* + * compressible kind if + * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is + * specified, otherwise just the kind + */ + s16 compr_kind, + + /* + * incompressible kind if + * NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is + * specified, otherwise ignored + */ + s16 incompr_kind, + + bool user_mapped, + int rw_flag, + u64 buffer_offset, + u64 mapping_size, + struct vm_gk20a_mapping_batch *mapping_batch); + +/* + * Notes: + * - Batch may be NULL if map op is not part of a batch. + * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is set, + * compr_kind and incompr_kind work as explained in nvgpu.h. + * - If NVGPU_AS_MAP_BUFFER_FLAGS_DIRECT_KIND_CTRL is NOT set, + * compr_kind holds the kind and kernel will figure out whether + * it is a compressible or incompressible kind. If compressible, kernel will + * also figure out the incompressible counterpart or return an error. + */ +int nvgpu_vm_map_buffer(struct vm_gk20a *vm, + int dmabuf_fd, + u64 *offset_align, + u32 flags, /* NVGPU_AS_MAP_BUFFER_FLAGS_ */ + s16 compr_kind, + s16 incompr_kind, + u64 buffer_offset, + u64 mapping_size, + struct vm_gk20a_mapping_batch *batch); + +/* Note: batch may be NULL if unmap op is not part of a batch */ +int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, + struct vm_gk20a_mapping_batch *batch); + +/* find buffer corresponding to va */ +int nvgpu_vm_find_buf(struct vm_gk20a *vm, u64 gpu_va, + struct dma_buf **dmabuf, + u64 *offset); + +enum nvgpu_aperture gk20a_dmabuf_aperture(struct gk20a *g, + struct dma_buf *dmabuf); +int validate_fixed_buffer(struct vm_gk20a *vm, + struct buffer_attrs *bfr, + u64 map_offset, u64 map_size, + struct nvgpu_vm_area **pva_node); + +#endif diff --git a/drivers/gpu/nvgpu/include/nvgpu/vm.h b/drivers/gpu/nvgpu/include/nvgpu/vm.h index 8c56461c..e529512b 100644 --- a/drivers/gpu/nvgpu/include/nvgpu/vm.h +++ b/drivers/gpu/nvgpu/include/nvgpu/vm.h @@ -207,14 +207,12 @@ void nvgpu_vm_put_buffers(struct vm_gk20a *vm, struct nvgpu_mapped_buf **mapped_buffers, int num_buffers); -/* Note: batch may be NULL if unmap op is not part of a batch */ -int nvgpu_vm_unmap_buffer(struct vm_gk20a *vm, u64 offset, - struct vm_gk20a_mapping_batch *batch); - void nvgpu_vm_unmap_locked(struct nvgpu_mapped_buf *mapped_buffer, struct vm_gk20a_mapping_batch *batch); void nvgpu_vm_unmap_locked_ref(struct nvgpu_ref *ref); +void nvgpu_vm_unmap(struct vm_gk20a *vm, u64 offset); + /* * These all require the VM update lock to be held. */ diff --git a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c index 8dcca0a1..c4256afb 100644 --- a/drivers/gpu/nvgpu/vgpu/mm_vgpu.c +++ b/drivers/gpu/nvgpu/vgpu/mm_vgpu.c @@ -32,6 +32,7 @@ #include +#include #include #include "vgpu/vgpu.h" @@ -39,8 +40,6 @@ #include "gk20a/mm_gk20a.h" #include "gm20b/mm_gm20b.h" -#include "common/linux/vm_priv.h" - static int vgpu_init_mm_setup_sw(struct gk20a *g) { struct mm_gk20a *mm = &g->mm; -- cgit v1.2.2