summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/vm.c
diff options
context:
space:
mode:
authorAlex Waterman <alexw@nvidia.com>2018-06-05 15:53:16 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2018-06-22 13:04:16 -0400
commit840e039d57d4acfb2be2a82c4b95a6d25c7aacd4 (patch)
treea4a358cf6bb5d005e8f6db159d1b3b03ddbf94df /drivers/gpu/nvgpu/os/linux/vm.c
parent46666ed101847d9b87ea60cd432dea97afbef0b1 (diff)
gpu: nvgpu: Update Linux side VM code for API solidification
Update the Linux specific code to match the MM API docs in the previous patch. The user passed page size is plumbed through the Linux VM mapping calls but is ultimately ignored once the core VM code is called. This will be handled in the next patch. This also adds some code to make the CDE page size picking happen semi-intelligently. In many cases the CDE buffers can be mapped with large pages. Bug 2011640 Change-Id: I20e78e7d5a841e410864b474179e71da1c2482f4 Signed-off-by: Alex Waterman <alexw@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/1740610 GVS: Gerrit_Virtual_Submit Reviewed-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/vm.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/vm.c39
1 files changed, 29 insertions, 10 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/vm.c b/drivers/gpu/nvgpu/os/linux/vm.c
index baa77515..eb9ca8fd 100644
--- a/drivers/gpu/nvgpu/os/linux/vm.c
+++ b/drivers/gpu/nvgpu/os/linux/vm.c
@@ -175,8 +175,9 @@ struct nvgpu_mapped_buf *nvgpu_vm_find_mapping(struct vm_gk20a *vm,
175 175
176int nvgpu_vm_map_linux(struct vm_gk20a *vm, 176int nvgpu_vm_map_linux(struct vm_gk20a *vm,
177 struct dma_buf *dmabuf, 177 struct dma_buf *dmabuf,
178 u64 offset_align, 178 u64 map_addr,
179 u32 flags, 179 u32 flags,
180 u32 page_size,
180 s16 compr_kind, 181 s16 compr_kind,
181 s16 incompr_kind, 182 s16 incompr_kind,
182 int rw_flag, 183 int rw_flag,
@@ -192,12 +193,8 @@ int nvgpu_vm_map_linux(struct vm_gk20a *vm,
192 struct nvgpu_sgt *nvgpu_sgt = NULL; 193 struct nvgpu_sgt *nvgpu_sgt = NULL;
193 struct nvgpu_mapped_buf *mapped_buffer = NULL; 194 struct nvgpu_mapped_buf *mapped_buffer = NULL;
194 struct dma_buf_attachment *attachment; 195 struct dma_buf_attachment *attachment;
195 u64 map_addr = 0ULL;
196 int err = 0; 196 int err = 0;
197 197
198 if (flags & NVGPU_VM_MAP_FIXED_OFFSET)
199 map_addr = offset_align;
200
201 sgt = gk20a_mm_pin(dev, dmabuf, &attachment); 198 sgt = gk20a_mm_pin(dev, dmabuf, &attachment);
202 if (IS_ERR(sgt)) { 199 if (IS_ERR(sgt)) {
203 nvgpu_warn(g, "Failed to pin dma_buf!"); 200 nvgpu_warn(g, "Failed to pin dma_buf!");
@@ -253,8 +250,9 @@ clean_up:
253 250
254int nvgpu_vm_map_buffer(struct vm_gk20a *vm, 251int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
255 int dmabuf_fd, 252 int dmabuf_fd,
256 u64 *offset_align, 253 u64 *map_addr,
257 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/ 254 u32 flags, /*NVGPU_AS_MAP_BUFFER_FLAGS_*/
255 u32 page_size,
258 s16 compr_kind, 256 s16 compr_kind,
259 s16 incompr_kind, 257 s16 incompr_kind,
260 u64 buffer_offset, 258 u64 buffer_offset,
@@ -274,8 +272,28 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
274 return PTR_ERR(dmabuf); 272 return PTR_ERR(dmabuf);
275 } 273 }
276 274
275 /*
276 * For regular maps we do not accept either an input address or a
277 * buffer_offset.
278 */
279 if (!(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET) &&
280 (buffer_offset || *map_addr)) {
281 nvgpu_err(g,
282 "Regular map with addr/buf offset is not supported!");
283 return -EINVAL;
284 }
285
286 /*
287 * Map size is always buffer size for non fixed mappings. As such map
288 * size should be left as zero by userspace for non-fixed maps.
289 */
290 if (mapping_size && !(flags & NVGPU_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET)) {
291 nvgpu_err(g, "map_size && non-fixed-mapping!");
292 return -EINVAL;
293 }
294
277 /* verify that we're not overflowing the buffer, i.e. 295 /* verify that we're not overflowing the buffer, i.e.
278 * (buffer_offset + mapping_size)> dmabuf->size. 296 * (buffer_offset + mapping_size) > dmabuf->size.
279 * 297 *
280 * Since buffer_offset + mapping_size could overflow, first check 298 * Since buffer_offset + mapping_size could overflow, first check
281 * that mapping size < dmabuf_size, at which point we can subtract 299 * that mapping size < dmabuf_size, at which point we can subtract
@@ -284,7 +302,7 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
284 if ((mapping_size > dmabuf->size) || 302 if ((mapping_size > dmabuf->size) ||
285 (buffer_offset > (dmabuf->size - mapping_size))) { 303 (buffer_offset > (dmabuf->size - mapping_size))) {
286 nvgpu_err(g, 304 nvgpu_err(g,
287 "buf size %llx < (offset(%llx) + map_size(%llx))\n", 305 "buf size %llx < (offset(%llx) + map_size(%llx))",
288 (u64)dmabuf->size, buffer_offset, mapping_size); 306 (u64)dmabuf->size, buffer_offset, mapping_size);
289 dma_buf_put(dmabuf); 307 dma_buf_put(dmabuf);
290 return -EINVAL; 308 return -EINVAL;
@@ -296,8 +314,9 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
296 return err; 314 return err;
297 } 315 }
298 316
299 err = nvgpu_vm_map_linux(vm, dmabuf, *offset_align, 317 err = nvgpu_vm_map_linux(vm, dmabuf, *map_addr,
300 nvgpu_vm_translate_linux_flags(g, flags), 318 nvgpu_vm_translate_linux_flags(g, flags),
319 page_size,
301 compr_kind, incompr_kind, 320 compr_kind, incompr_kind,
302 gk20a_mem_flag_none, 321 gk20a_mem_flag_none,
303 buffer_offset, 322 buffer_offset,
@@ -306,7 +325,7 @@ int nvgpu_vm_map_buffer(struct vm_gk20a *vm,
306 &ret_va); 325 &ret_va);
307 326
308 if (!err) 327 if (!err)
309 *offset_align = ret_va; 328 *map_addr = ret_va;
310 else 329 else
311 dma_buf_put(dmabuf); 330 dma_buf_put(dmabuf);
312 331