diff options
author | Chunming Zhou <david1.zhou@amd.com> | 2015-10-16 02:06:19 -0400 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-10-21 11:35:14 -0400 |
commit | f48b2659f521301753f9b3b67e308a79c6110346 (patch) | |
tree | a6f1fff1739dbfa305453febc26f39df2e4232f0 /drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |
parent | ce16b0e5a32a157abd6446214e8b91c55064204e (diff) |
drm/amdgpu: fix the broken vm->mutex V2
fix the vm->mutex and ww_mutex confilcts.
vm->mutex is always token first, then ww_mutex.
V2: remove unneccessary checking for pt bo.
Change-Id: Iea56e183752c02831126d06d2f5b7a474a6e4743
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d81ab785368a..087332858853 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -115,9 +115,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
115 | struct amdgpu_vm *vm = &fpriv->vm; | 115 | struct amdgpu_vm *vm = &fpriv->vm; |
116 | struct amdgpu_bo_va *bo_va; | 116 | struct amdgpu_bo_va *bo_va; |
117 | int r; | 117 | int r; |
118 | 118 | mutex_lock(&vm->mutex); | |
119 | r = amdgpu_bo_reserve(rbo, false); | 119 | r = amdgpu_bo_reserve(rbo, false); |
120 | if (r) { | 120 | if (r) { |
121 | mutex_unlock(&vm->mutex); | ||
121 | return r; | 122 | return r; |
122 | } | 123 | } |
123 | 124 | ||
@@ -128,7 +129,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri | |||
128 | ++bo_va->ref_count; | 129 | ++bo_va->ref_count; |
129 | } | 130 | } |
130 | amdgpu_bo_unreserve(rbo); | 131 | amdgpu_bo_unreserve(rbo); |
131 | 132 | mutex_unlock(&vm->mutex); | |
132 | return 0; | 133 | return 0; |
133 | } | 134 | } |
134 | 135 | ||
@@ -141,9 +142,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
141 | struct amdgpu_vm *vm = &fpriv->vm; | 142 | struct amdgpu_vm *vm = &fpriv->vm; |
142 | struct amdgpu_bo_va *bo_va; | 143 | struct amdgpu_bo_va *bo_va; |
143 | int r; | 144 | int r; |
144 | 145 | mutex_lock(&vm->mutex); | |
145 | r = amdgpu_bo_reserve(rbo, true); | 146 | r = amdgpu_bo_reserve(rbo, true); |
146 | if (r) { | 147 | if (r) { |
148 | mutex_unlock(&vm->mutex); | ||
147 | dev_err(adev->dev, "leaking bo va because " | 149 | dev_err(adev->dev, "leaking bo va because " |
148 | "we fail to reserve bo (%d)\n", r); | 150 | "we fail to reserve bo (%d)\n", r); |
149 | return; | 151 | return; |
@@ -155,6 +157,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, | |||
155 | } | 157 | } |
156 | } | 158 | } |
157 | amdgpu_bo_unreserve(rbo); | 159 | amdgpu_bo_unreserve(rbo); |
160 | mutex_unlock(&vm->mutex); | ||
158 | } | 161 | } |
159 | 162 | ||
160 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) | 163 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) |
@@ -481,18 +484,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, | |||
481 | goto error_unreserve; | 484 | goto error_unreserve; |
482 | } | 485 | } |
483 | 486 | ||
484 | mutex_lock(&bo_va->vm->mutex); | ||
485 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); | 487 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); |
486 | if (r) | 488 | if (r) |
487 | goto error_unlock; | 489 | goto error_unreserve; |
488 | |||
489 | 490 | ||
490 | if (operation == AMDGPU_VA_OP_MAP) | 491 | if (operation == AMDGPU_VA_OP_MAP) |
491 | r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); | 492 | r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); |
492 | 493 | ||
493 | error_unlock: | ||
494 | mutex_unlock(&bo_va->vm->mutex); | ||
495 | |||
496 | error_unreserve: | 494 | error_unreserve: |
497 | ttm_eu_backoff_reservation(&ticket, &list); | 495 | ttm_eu_backoff_reservation(&ticket, &list); |
498 | 496 | ||
@@ -549,10 +547,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
549 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | 547 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
550 | if (gobj == NULL) | 548 | if (gobj == NULL) |
551 | return -ENOENT; | 549 | return -ENOENT; |
552 | 550 | mutex_lock(&fpriv->vm.mutex); | |
553 | rbo = gem_to_amdgpu_bo(gobj); | 551 | rbo = gem_to_amdgpu_bo(gobj); |
554 | r = amdgpu_bo_reserve(rbo, false); | 552 | r = amdgpu_bo_reserve(rbo, false); |
555 | if (r) { | 553 | if (r) { |
554 | mutex_unlock(&fpriv->vm.mutex); | ||
556 | drm_gem_object_unreference_unlocked(gobj); | 555 | drm_gem_object_unreference_unlocked(gobj); |
557 | return r; | 556 | return r; |
558 | } | 557 | } |
@@ -560,6 +559,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
560 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); | 559 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); |
561 | if (!bo_va) { | 560 | if (!bo_va) { |
562 | amdgpu_bo_unreserve(rbo); | 561 | amdgpu_bo_unreserve(rbo); |
562 | mutex_unlock(&fpriv->vm.mutex); | ||
563 | return -ENOENT; | 563 | return -ENOENT; |
564 | } | 564 | } |
565 | 565 | ||
@@ -584,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |||
584 | 584 | ||
585 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) | 585 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) |
586 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); | 586 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
587 | 587 | mutex_unlock(&fpriv->vm.mutex); | |
588 | drm_gem_object_unreference_unlocked(gobj); | 588 | drm_gem_object_unreference_unlocked(gobj); |
589 | return r; | 589 | return r; |
590 | } | 590 | } |