summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/os/linux/ioctl_as.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/nvgpu/os/linux/ioctl_as.c')
-rw-r--r--drivers/gpu/nvgpu/os/linux/ioctl_as.c197
1 files changed, 197 insertions, 0 deletions
diff --git a/drivers/gpu/nvgpu/os/linux/ioctl_as.c b/drivers/gpu/nvgpu/os/linux/ioctl_as.c
index f0cec178..9708ea1a 100644
--- a/drivers/gpu/nvgpu/os/linux/ioctl_as.c
+++ b/drivers/gpu/nvgpu/os/linux/ioctl_as.c
@@ -32,6 +32,9 @@
32#include "platform_gk20a.h" 32#include "platform_gk20a.h"
33#include "ioctl_as.h" 33#include "ioctl_as.h"
34#include "os_linux.h" 34#include "os_linux.h"
35#include <linux/nvmap.h> // For nvmap_dmabuf_{d/r}ealloc()
36#include "dmabuf.h" // struct dma_buf things for swapping
37#include "swap.h"
35 38
36static u32 gk20a_as_translate_as_alloc_space_flags(struct gk20a *g, u32 flags) 39static u32 gk20a_as_translate_as_alloc_space_flags(struct gk20a *g, u32 flags)
37{ 40{
@@ -329,6 +332,192 @@ int gk20a_as_dev_release(struct inode *inode, struct file *filp)
329 332
330 return gk20a_as_release_share(as_share); 333 return gk20a_as_release_share(as_share);
331} 334}
335#define OLD_WALK 0
336
337/* Access dmabuf associated with passed file descriptor, copy the associated
338 * pages to an NVME drive, unpin associated pages from DMA'able space, and free
339 * said pages for use by others.
340 * dmabuf is put in a deallocated state, and any GPU mappings will be
341 * invalidated. To restore the dmabuf, see nvgpu_as_ioctl_read_swap_buffer().
342 */
343static int nvgpu_as_ioctl_write_swap_buffer(
344 struct gk20a_as_share *as_share,
345 struct nvgpu_as_swap_buffer_args *args)
346{
347 struct gk20a *g = gk20a_from_vm(as_share->vm);
348 int err = 0;
349#if OLD_WALK
350 struct nvgpu_rbtree_node *node;
351#endif
352 struct nvgpu_mapped_buf *m;
353 struct sg_table *sgt;
354 struct vm_gk20a *vm = as_share->vm;
355 struct dma_buf *dmabuf = dma_buf_get(args->dmabuf_fd);
356
357 nvgpu_log_fn(g, " ");
358
359 if (IS_ERR(dmabuf))
360 return PTR_ERR(dmabuf);
361
362 // Other code walking vm->mapped_buffers grabs this lock
363 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
364
365#if OLD_WALK
366 // Get mapped buffer corresponding to this dmabuf
367 // TODO: Error on buffer mapped >1
368 for_each_buffer(node, vm->mapped_buffers, m) {
369 if (m->os_priv.dmabuf == dmabuf)
370 break;
371 }
372 // If failed search
373 if (!node || !m) {
374 // No mapped dmabuf associated with FD
375 err = -EBADFD;
376 goto out_put_unlock;
377 }
378#else
379 m = dmabuf_to_mapped_buf(dmabuf);
380 // If failed search
381 if (IS_ERR(m)) {
382 // No mapped dmabuf associated with FD
383 err = -EBADFD;
384 goto out_put_unlock;
385 }
386#endif
387
388 // Disable an annoying custom out-of-tree "feature" of dma_buf which defers unmap
389 if (dma_buf_disable_lazy_unmapping(dev_from_vm(vm))) {
390 err = -ENOTRECOVERABLE;
391 goto out_put_unlock;
392 }
393
394 // Flush dirty GPU L2 cache lines to DRAM
395 // (Assuming that NVMe DRAM acceses are uncached)
396 gk20a_mm_l2_flush(g, false);
397
398 // Copy out (blocking)
399 err = copy_out(m->os_priv.sgt);
400 if (err) {
401 // Inaccessible swap device, etc
402 goto out_put_unlock;
403 }
404
405 // Unpin needs to happen after copy out is done
406 // (No return value check as it's a void function)
407 gk20a_mm_unpin(dev_from_vm(vm), m->os_priv.dmabuf,
408 m->os_priv.attachment, m->os_priv.sgt);
409
410 // Deallocate dmabuf's backing pages
411 // TODO: Fail early for these cases (where the dmabuf is mmaped, etc),
412 // before we do all the above (expensive) steps
413 err = nvmap_dealloc_dmabuf(dmabuf);
414 if (err) {
415 // Repin
416 sgt = gk20a_mm_pin(dev_from_vm(vm), m->os_priv.dmabuf,
417 &m->os_priv.attachment);
418 m->os_priv.sgt = sgt;
419 goto out_put_unlock;
420 }
421
422out_put_unlock:
423 // Done with dmabuf, so release our ref to it
424 dma_buf_put(dmabuf);
425 nvgpu_mutex_release(&vm->update_gmmu_lock);
426 return err;
427}
428
429// Undoes everything nvgpu_as_ioctl_write_swap_buffer() does
430static int nvgpu_as_ioctl_read_swap_buffer(
431 struct gk20a_as_share *as_share,
432 struct nvgpu_as_swap_buffer_args *args)
433{
434 struct gk20a *g = gk20a_from_vm(as_share->vm);
435 int err = 0;
436#if OLD_WALK
437 struct nvgpu_rbtree_node *node;
438#endif
439 struct nvgpu_mapped_buf *m;
440 struct sg_table *sgt;
441 struct vm_gk20a *vm = as_share->vm;
442 struct dma_buf *dmabuf = dma_buf_get(args->dmabuf_fd);
443
444 nvgpu_log_fn(g, " ");
445
446 if (!dmabuf)
447 return -EBADF;
448 // Other code walking vm->mapped_buffers grabs this lock
449 nvgpu_mutex_acquire(&vm->update_gmmu_lock);
450
451#if OLD_WALK
452 // Get mapped buffer corresponding to this dmabuf
453 // TODO: Error on buffer mapped >1
454 for_each_buffer(node, vm->mapped_buffers, m) {
455 if (m->os_priv.dmabuf == dmabuf)
456 break;
457 }
458 // If failed search
459 if (!node || !m) {
460 // No mapped dmabuf associated with FD
461 err = -EBADFD;
462 goto out_put_unlock;
463 }
464#else
465 m = dmabuf_to_mapped_buf(dmabuf);
466 // If failed search
467 if (IS_ERR(m)) {
468 // No mapped dmabuf associated with FD
469 err = -EBADFD;
470 goto out_put_unlock;
471 }
472#endif
473
474 // Reallocate space for this buffer
475 err = nvmap_realloc_dmabuf(dmabuf);
476 if (err) {
477 // Out of memory (?)
478 goto out_put_unlock;
479 }
480
481 // Repin the buffer to DMA'able memory
482 sgt = gk20a_mm_pin(dev_from_vm(vm), m->os_priv.dmabuf,
483 &m->os_priv.attachment);
484 if (IS_ERR(sgt)) {
485 // Rollback allocation
486 err = nvmap_dealloc_dmabuf(dmabuf);
487 if (err)
488 printk(KERN_ERR "nvgpu: Error %d while rolling back dmabuf allocation state on error in gk20a_mm_pin()! Consider dmabuf FD %d to be in an inconsistent state!\n", err, args->dmabuf_fd);
489 err = PTR_ERR(sgt);
490 goto out_put_unlock;
491 }
492 // Do any bookeeping not done by gk20a_mm_pin()
493 m->os_priv.sgt = sgt;
494
495 // Reload page contents from disk (blocking)
496 err = copy_in(sgt);
497 if (err) {
498 int err2;
499 // Rollback pinning and allocation
500 gk20a_mm_unpin(dev_from_vm(vm), m->os_priv.dmabuf,
501 m->os_priv.attachment, m->os_priv.sgt);
502 err2 = nvmap_dealloc_dmabuf(dmabuf);
503 if (err2)
504 printk(KERN_ERR "nvgpu: Error %d while rolling back dmabuf allocation state on error in copy_in()! Consider dmabuf FD %d to be in an inconsistent state!\n", err2, args->dmabuf_fd);
505 // Inaccessible swap device, etc
506 goto out_put_unlock;
507 }
508 // Update GPU page tables (PT) to point to new allocation
509 nvgpu_vm_remap(m);
510 // Due to PT update, translation lookaside buffer needs clearing
511 g->ops.fb.tlb_invalidate(g, vm->pdb.mem);
512 // Invalidate L2 so that TLB refill does not load stale PT
513 gk20a_mm_l2_flush(g, true);
514
515out_put_unlock:
516 // Done with dmabuf, so release our ref to it
517 dma_buf_put(dmabuf);
518 nvgpu_mutex_release(&vm->update_gmmu_lock);
519 return err;
520}
332 521
333long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 522long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
334{ 523{
@@ -412,6 +601,14 @@ long gk20a_as_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
412 err = nvgpu_as_ioctl_get_sync_ro_map(as_share, 601 err = nvgpu_as_ioctl_get_sync_ro_map(as_share,
413 (struct nvgpu_as_get_sync_ro_map_args *)buf); 602 (struct nvgpu_as_get_sync_ro_map_args *)buf);
414 break; 603 break;
604 case NVGPU_AS_IOCTL_READ_SWAP_BUFFER:
605 err = nvgpu_as_ioctl_read_swap_buffer(as_share,
606 (struct nvgpu_as_swap_buffer_args *)buf);
607 break;
608 case NVGPU_AS_IOCTL_WRITE_SWAP_BUFFER:
609 err = nvgpu_as_ioctl_write_swap_buffer(as_share,
610 (struct nvgpu_as_swap_buffer_args *)buf);
611 break;
415 default: 612 default:
416 err = -ENOTTY; 613 err = -ENOTTY;
417 break; 614 break;