summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPreetham Chandru Ramchandra <pchandru@nvidia.com>2019-01-22 04:41:26 -0500
committermobile promotions <svcmobile_promotions@nvidia.com>2019-01-22 22:14:53 -0500
commitdbb014e34f89298391d65ca2177baac43b60455e (patch)
tree6ef0df3bef9ccf26ad9c7f34ba7cf04c1b2edb94 /include
parent89d5f40116d0d84c2f6d8427560dd24f64f2dcaa (diff)
gpu: nvgpu: move nv-p2p.h to include/linux
Move nv-p2p.h to include/linux so that it is available to external kernel modules to be used. Bug 200438879 Change-Id: I40707fe9f798b3ccf077dbdc942f8d6fc9019458 Signed-off-by: Preetham Chandru R <pchandru@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1986646 (cherry picked from commit cfe4a2e5e87560519aedb92e2d12e39e09473a54) Reviewed-on: https://git-master.nvidia.com/r/2000830 GVS: Gerrit_Virtual_Submit Reviewed-by: Bibek Basu <bbasu@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'include')
-rw-r--r--include/linux/nv-p2p.h196
1 files changed, 196 insertions, 0 deletions
diff --git a/include/linux/nv-p2p.h b/include/linux/nv-p2p.h
new file mode 100644
index 00000000..c1dee7cf
--- /dev/null
+++ b/include/linux/nv-p2p.h
@@ -0,0 +1,196 @@
1/*
2 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NVIDIA_P2P_H__
24#define __NVIDIA_P2P_H__
25
26#include <linux/dma-mapping.h>
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/init.h>
30#include <linux/miscdevice.h>
31#include <linux/fs.h>
32#include <linux/mm.h>
33#include <linux/mman.h>
34#include <linux/mmu_notifier.h>
35#include <linux/types.h>
36#include <linux/uaccess.h>
37#include <linux/sched.h>
38#include <linux/spinlock.h>
39#include <linux/slab.h>
40#include <linux/highmem.h>
41#include <linux/mutex.h>
42#include <linux/spinlock.h>
43#include <nvgpu/linux/lock.h>
44
45#define NVIDIA_P2P_UNINITIALIZED 0x0
46#define NVIDIA_P2P_PINNED 0x1
47#define NVIDIA_P2P_MAPPED 0x2
48
49enum nvidia_p2p_page_size_type {
50 NVIDIA_P2P_PAGE_SIZE_4KB = 0,
51 NVIDIA_P2P_PAGE_SIZE_64KB,
52 NVIDIA_P2P_PAGE_SIZE_128KB,
53 NVIDIA_P2P_PAGE_SIZE_COUNT
54};
55
56struct nvidia_p2p_page_table {
57 u32 page_size;
58 u64 size;
59 u32 entries;
60 struct page **pages;
61
62 u64 vaddr;
63 u32 mapped;
64
65 struct mm_struct *mm;
66 struct mmu_notifier mn;
67 struct nvgpu_mutex lock;
68 void (*free_callback)(void *data);
69 void *data;
70};
71
72struct nvidia_p2p_dma_mapping {
73 dma_addr_t *hw_address;
74 u32 *hw_len;
75 u32 entries;
76
77 struct sg_table *sgt;
78 struct device *dev;
79 struct nvidia_p2p_page_table *page_table;
80 enum dma_data_direction direction;
81};
82
83/*
84 * @brief
85 * Make the pages underlying a range of GPU virtual memory
86 * accessible to a third-party device.
87 *
88 * @param[in] vaddr
89 * A GPU Virtual Address
90 * @param[in] size
91 * The size of the requested mapping.
92 * Size must be a multiple of Page size.
93 * @param[out] **page_table
94 * A pointer to struct nvidia_p2p_page_table
95 * @param[in] free_callback
96 * A non-NULL pointer to the function to be invoked when the pages
97 * underlying the virtual address range are freed
98 * implicitly. Must be non NULL.
99 * @param[in] data
100 * A non-NULL opaque pointer to private data to be passed to the
101 * callback function.
102 *
103 * @return
104 * 0 upon successful completion.
105 * Negative number if any error
106 */
107int nvidia_p2p_get_pages(u64 vaddr, u64 size,
108 struct nvidia_p2p_page_table **page_table,
109 void (*free_callback)(void *data), void *data);
110/*
111 * @brief
112 * Release the pages previously made accessible to
113 * a third-party device.
114 *
115 * @param[in] *page_table
116 * A pointer to struct nvidia_p2p_page_table
117 *
118 * @return
119 * 0 upon successful completion.
120 * -ENOMEM if the driver failed to allocate memory or if
121 * insufficient resources were available to complete the operation.
122 * Negative number if any other error
123 */
124int nvidia_p2p_put_pages(struct nvidia_p2p_page_table *page_table);
125
126/*
127 * @brief
128 * Release the pages previously made accessible to
129 * a third-party device. This is called during the
130 * execution of the free_callback().
131 *
132 * @param[in] *page_table
133 * A pointer to struct nvidia_p2p_page_table
134 *
135 * @return
136 * 0 upon successful completion.
137 * -ENOMEM if the driver failed to allocate memory or if
138 * insufficient resources were available to complete the operation.
139 * Negative number if any other error
140 */
141int nvidia_p2p_free_page_table(struct nvidia_p2p_page_table *page_table);
142
143/*
144 * @brief
145 * Map the pages retrieved using nvidia_p2p_get_pages and
146 * pass the dma address to a third-party device.
147 *
148 * @param[in] *dev
149 * The peer device that needs to DMA to/from the
150 * mapping.
151 * @param[in] *page_table
152 * A pointer to struct nvidia_p2p_page_table
153 * @param[out] **map
154 * A pointer to struct nvidia_p2p_dma_mapping.
155 * The DMA mapping containing the DMA addresses to use.
156 * @param[in] direction
157 * DMA direction
158 *
159 * @return
160 * 0 upon successful completion.
161 * Negative number if any other error
162 */
163int nvidia_p2p_map_pages(struct device *dev,
164 struct nvidia_p2p_page_table *page_table,
165 struct nvidia_p2p_dma_mapping **map,
166 enum dma_data_direction direction);
167/*
168 * @brief
169 * Unmap the pages previously mapped using nvidia_p2p_map_pages
170 *
171 * @param[in] *map
172 * A pointer to struct nvidia_p2p_dma_mapping.
173 * The DMA mapping containing the DMA addresses to use.
174 *
175 * @return
176 * 0 upon successful completion.
177 * Negative number if any other error
178 */
179int nvidia_p2p_unmap_pages(struct nvidia_p2p_dma_mapping *map);
180
181/*
182 * @brief
183 * Unmap the pages previously mapped using nvidia_p2p_map_pages.
184 * This is called during the execution of the free_callback().
185 *
186 * @param[in] *map
187 * A pointer to struct nvidia_p2p_dma_mapping.
188 * The DMA mapping containing the DMA addresses to use.
189 *
190 * @return
191 * 0 upon successful completion.
192 * Negative number if any other error
193 */
194int nvidia_p2p_free_dma_mapping(struct nvidia_p2p_dma_mapping *dma_mapping);
195
196#endif