From dbb014e34f89298391d65ca2177baac43b60455e Mon Sep 17 00:00:00 2001 From: Preetham Chandru Ramchandra Date: Tue, 22 Jan 2019 15:11:26 +0530 Subject: gpu: nvgpu: move nv-p2p.h to include/linux Move nv-p2p.h to include/linux so that it is available to external kernel modules to be used. Bug 200438879 Change-Id: I40707fe9f798b3ccf077dbdc942f8d6fc9019458 Signed-off-by: Preetham Chandru R Reviewed-on: https://git-master.nvidia.com/r/1986646 (cherry picked from commit cfe4a2e5e87560519aedb92e2d12e39e09473a54) Reviewed-on: https://git-master.nvidia.com/r/2000830 GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu Reviewed-by: mobile promotions Tested-by: mobile promotions --- include/linux/nv-p2p.h | 196 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 196 insertions(+) create mode 100644 include/linux/nv-p2p.h (limited to 'include/linux') diff --git a/include/linux/nv-p2p.h b/include/linux/nv-p2p.h new file mode 100644 index 00000000..c1dee7cf --- /dev/null +++ b/include/linux/nv-p2p.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NVIDIA_P2P_H__ +#define __NVIDIA_P2P_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define NVIDIA_P2P_UNINITIALIZED 0x0 +#define NVIDIA_P2P_PINNED 0x1 +#define NVIDIA_P2P_MAPPED 0x2 + +enum nvidia_p2p_page_size_type { + NVIDIA_P2P_PAGE_SIZE_4KB = 0, + NVIDIA_P2P_PAGE_SIZE_64KB, + NVIDIA_P2P_PAGE_SIZE_128KB, + NVIDIA_P2P_PAGE_SIZE_COUNT +}; + +struct nvidia_p2p_page_table { + u32 page_size; + u64 size; + u32 entries; + struct page **pages; + + u64 vaddr; + u32 mapped; + + struct mm_struct *mm; + struct mmu_notifier mn; + struct nvgpu_mutex lock; + void (*free_callback)(void *data); + void *data; +}; + +struct nvidia_p2p_dma_mapping { + dma_addr_t *hw_address; + u32 *hw_len; + u32 entries; + + struct sg_table *sgt; + struct device *dev; + struct nvidia_p2p_page_table *page_table; + enum dma_data_direction direction; +}; + +/* + * @brief + * Make the pages underlying a range of GPU virtual memory + * accessible to a third-party device. + * + * @param[in] vaddr + * A GPU Virtual Address + * @param[in] size + * The size of the requested mapping. + * Size must be a multiple of Page size. + * @param[out] **page_table + * A pointer to struct nvidia_p2p_page_table + * @param[in] free_callback + * A non-NULL pointer to the function to be invoked when the pages + * underlying the virtual address range are freed + * implicitly. Must be non NULL. + * @param[in] data + * A non-NULL opaque pointer to private data to be passed to the + * callback function. + * + * @return + * 0 upon successful completion. + * Negative number if any error + */ +int nvidia_p2p_get_pages(u64 vaddr, u64 size, + struct nvidia_p2p_page_table **page_table, + void (*free_callback)(void *data), void *data); +/* + * @brief + * Release the pages previously made accessible to + * a third-party device. + * + * @param[in] *page_table + * A pointer to struct nvidia_p2p_page_table + * + * @return + * 0 upon successful completion. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * Negative number if any other error + */ +int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Release the pages previously made accessible to + * a third-party device. This is called during the + * execution of the free_callback(). + * + * @param[in] *page_table + * A pointer to struct nvidia_p2p_page_table + * + * @return + * 0 upon successful completion. + * -ENOMEM if the driver failed to allocate memory or if + * insufficient resources were available to complete the operation. + * Negative number if any other error + */ +int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table); + +/* + * @brief + * Map the pages retrieved using nvidia_p2p_get_pages and + * pass the dma address to a third-party device. + * + * @param[in] *dev + * The peer device that needs to DMA to/from the + * mapping. + * @param[in] *page_table + * A pointer to struct nvidia_p2p_page_table + * @param[out] **map + * A pointer to struct nvidia_p2p_dma_mapping. + * The DMA mapping containing the DMA addresses to use. + * @param[in] direction + * DMA direction + * + * @return + * 0 upon successful completion. + * Negative number if any other error + */ +int nvidia_p2p_map_pages(struct device *dev, + struct nvidia_p2p_page_table *page_table, + struct nvidia_p2p_dma_mapping **map, + enum dma_data_direction direction); +/* + * @brief + * Unmap the pages previously mapped using nvidia_p2p_map_pages + * + * @param[in] *map + * A pointer to struct nvidia_p2p_dma_mapping. + * The DMA mapping containing the DMA addresses to use. + * + * @return + * 0 upon successful completion. + * Negative number if any other error + */ +int nvidia_p2p_unmap_pages(struct nvidia_p2p_dma_mapping *map); + +/* + * @brief + * Unmap the pages previously mapped using nvidia_p2p_map_pages. + * This is called during the execution of the free_callback(). + * + * @param[in] *map + * A pointer to struct nvidia_p2p_dma_mapping. + * The DMA mapping containing the DMA addresses to use. + * + * @return + * 0 upon successful completion. + * Negative number if any other error + */ +int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping); + +#endif -- cgit v1.2.2