diff options
author | Christian König <christian.koenig@amd.com> | 2015-06-08 09:03:00 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-06-08 21:42:53 -0400 |
commit | 34b5f6a6d6d0e482c7ce498f60bce261e533821e (patch) | |
tree | 04d5de2c0fff82edf6aeb6492148689bad8b76eb /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | 6c7fc503a47f9b87dfd7c76e34f808ab6870a82f (diff) |
drm/amdgpu: cleanup VA IOCTL
Remove the unnecessary returned status and make the IOCTL write only.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 64 |
1 files changed, 21 insertions, 43 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e8409fea4bf1..0ec222295fee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -505,7 +505,7 @@ error_free: | |||
505 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | 505 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
506 | struct drm_file *filp) | 506 | struct drm_file *filp) |
507 | { | 507 | { |
508 | union drm_amdgpu_gem_va *args = data; | 508 | struct drm_amdgpu_gem_va *args = data; |
509 | struct drm_gem_object *gobj; | 509 | struct drm_gem_object *gobj; |
510 | struct amdgpu_device *adev = dev->dev_private; | 510 | struct amdgpu_device *adev = dev->dev_private; |
511 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 511 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
@@ -514,95 +514,73 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
514 | uint32_t invalid_flags, va_flags = 0; | 514 | uint32_t invalid_flags, va_flags = 0; |
515 | int r = 0; | 515 | int r = 0; |
516 | 516 | ||
517 | if (!adev->vm_manager.enabled) { | 517 | if (!adev->vm_manager.enabled) |
518 | memset(args, 0, sizeof(*args)); | ||
519 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
520 | return -ENOTTY; | 518 | return -ENOTTY; |
521 | } | ||
522 | 519 | ||
523 | if (args->in.va_address < AMDGPU_VA_RESERVED_SIZE) { | 520 | if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { |
524 | dev_err(&dev->pdev->dev, | 521 | dev_err(&dev->pdev->dev, |
525 | "va_address 0x%lX is in reserved area 0x%X\n", | 522 | "va_address 0x%lX is in reserved area 0x%X\n", |
526 | (unsigned long)args->in.va_address, | 523 | (unsigned long)args->va_address, |
527 | AMDGPU_VA_RESERVED_SIZE); | 524 | AMDGPU_VA_RESERVED_SIZE); |
528 | memset(args, 0, sizeof(*args)); | ||
529 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
530 | return -EINVAL; | 525 | return -EINVAL; |
531 | } | 526 | } |
532 | 527 | ||
533 | invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | | 528 | invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | |
534 | AMDGPU_VM_PAGE_EXECUTABLE); | 529 | AMDGPU_VM_PAGE_EXECUTABLE); |
535 | if ((args->in.flags & invalid_flags)) { | 530 | if ((args->flags & invalid_flags)) { |
536 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | 531 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", |
537 | args->in.flags, invalid_flags); | 532 | args->flags, invalid_flags); |
538 | memset(args, 0, sizeof(*args)); | ||
539 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
540 | return -EINVAL; | 533 | return -EINVAL; |
541 | } | 534 | } |
542 | 535 | ||
543 | switch (args->in.operation) { | 536 | switch (args->operation) { |
544 | case AMDGPU_VA_OP_MAP: | 537 | case AMDGPU_VA_OP_MAP: |
545 | case AMDGPU_VA_OP_UNMAP: | 538 | case AMDGPU_VA_OP_UNMAP: |
546 | break; | 539 | break; |
547 | default: | 540 | default: |
548 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", | 541 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", |
549 | args->in.operation); | 542 | args->operation); |
550 | memset(args, 0, sizeof(*args)); | ||
551 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
552 | return -EINVAL; | 543 | return -EINVAL; |
553 | } | 544 | } |
554 | 545 | ||
555 | gobj = drm_gem_object_lookup(dev, filp, args->in.handle); | 546 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
556 | if (gobj == NULL) { | 547 | if (gobj == NULL) |
557 | memset(args, 0, sizeof(*args)); | ||
558 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
559 | return -ENOENT; | 548 | return -ENOENT; |
560 | } | 549 | |
561 | rbo = gem_to_amdgpu_bo(gobj); | 550 | rbo = gem_to_amdgpu_bo(gobj); |
562 | r = amdgpu_bo_reserve(rbo, false); | 551 | r = amdgpu_bo_reserve(rbo, false); |
563 | if (r) { | 552 | if (r) { |
564 | if (r != -ERESTARTSYS) { | ||
565 | memset(args, 0, sizeof(*args)); | ||
566 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
567 | } | ||
568 | drm_gem_object_unreference_unlocked(gobj); | 553 | drm_gem_object_unreference_unlocked(gobj); |
569 | return r; | 554 | return r; |
570 | } | 555 | } |
556 | |||
571 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | 557 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); |
572 | if (!bo_va) { | 558 | if (!bo_va) { |
573 | memset(args, 0, sizeof(*args)); | 559 | amdgpu_bo_unreserve(rbo); |
574 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
575 | drm_gem_object_unreference_unlocked(gobj); | ||
576 | return -ENOENT; | 560 | return -ENOENT; |
577 | } | 561 | } |
578 | 562 | ||
579 | switch (args->in.operation) { | 563 | switch (args->operation) { |
580 | case AMDGPU_VA_OP_MAP: | 564 | case AMDGPU_VA_OP_MAP: |
581 | if (args->in.flags & AMDGPU_VM_PAGE_READABLE) | 565 | if (args->flags & AMDGPU_VM_PAGE_READABLE) |
582 | va_flags |= AMDGPU_PTE_READABLE; | 566 | va_flags |= AMDGPU_PTE_READABLE; |
583 | if (args->in.flags & AMDGPU_VM_PAGE_WRITEABLE) | 567 | if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) |
584 | va_flags |= AMDGPU_PTE_WRITEABLE; | 568 | va_flags |= AMDGPU_PTE_WRITEABLE; |
585 | if (args->in.flags & AMDGPU_VM_PAGE_EXECUTABLE) | 569 | if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) |
586 | va_flags |= AMDGPU_PTE_EXECUTABLE; | 570 | va_flags |= AMDGPU_PTE_EXECUTABLE; |
587 | r = amdgpu_vm_bo_map(adev, bo_va, args->in.va_address, | 571 | r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, |
588 | args->in.offset_in_bo, args->in.map_size, | 572 | args->offset_in_bo, args->map_size, |
589 | va_flags); | 573 | va_flags); |
590 | break; | 574 | break; |
591 | case AMDGPU_VA_OP_UNMAP: | 575 | case AMDGPU_VA_OP_UNMAP: |
592 | r = amdgpu_vm_bo_unmap(adev, bo_va, args->in.va_address); | 576 | r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); |
593 | break; | 577 | break; |
594 | default: | 578 | default: |
595 | break; | 579 | break; |
596 | } | 580 | } |
597 | 581 | ||
598 | if (!r) { | 582 | if (!r) |
599 | amdgpu_gem_va_update_vm(adev, bo_va); | 583 | amdgpu_gem_va_update_vm(adev, bo_va); |
600 | memset(args, 0, sizeof(*args)); | ||
601 | args->out.result = AMDGPU_VA_RESULT_OK; | ||
602 | } else { | ||
603 | memset(args, 0, sizeof(*args)); | ||
604 | args->out.result = AMDGPU_VA_RESULT_ERROR; | ||
605 | } | ||
606 | 584 | ||
607 | drm_gem_object_unreference_unlocked(gobj); | 585 | drm_gem_object_unreference_unlocked(gobj); |
608 | return r; | 586 | return r; |