aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChunming Zhou <david1.zhou@amd.com>2015-10-16 02:06:19 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-10-21 11:35:14 -0400
commitf48b2659f521301753f9b3b67e308a79c6110346 (patch)
treea6f1fff1739dbfa305453febc26f39df2e4232f0 /drivers
parentce16b0e5a32a157abd6446214e8b91c55064204e (diff)
drm/amdgpu: fix the broken vm->mutex V2
fix the vm->mutex and ww_mutex confilcts. vm->mutex is always token first, then ww_mutex. V2: remove unneccessary checking for pt bo. Change-Id: Iea56e183752c02831126d06d2f5b7a474a6e4743 Signed-off-by: Chunming Zhou <david1.zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c35
3 files changed, 19 insertions, 46 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 5fdc0394561e..8727c3099554 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -608,7 +608,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
608 } 608 }
609 } 609 }
610 610
611 mutex_lock(&vm->mutex);
612 r = amdgpu_bo_vm_update_pte(parser, vm); 611 r = amdgpu_bo_vm_update_pte(parser, vm);
613 if (r) { 612 if (r) {
614 goto out; 613 goto out;
@@ -619,7 +618,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
619 parser->filp); 618 parser->filp);
620 619
621out: 620out:
622 mutex_unlock(&vm->mutex);
623 return r; 621 return r;
624} 622}
625 623
@@ -827,6 +825,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
827{ 825{
828 struct amdgpu_device *adev = dev->dev_private; 826 struct amdgpu_device *adev = dev->dev_private;
829 union drm_amdgpu_cs *cs = data; 827 union drm_amdgpu_cs *cs = data;
828 struct amdgpu_fpriv *fpriv = filp->driver_priv;
829 struct amdgpu_vm *vm = &fpriv->vm;
830 struct amdgpu_cs_parser *parser; 830 struct amdgpu_cs_parser *parser;
831 bool reserved_buffers = false; 831 bool reserved_buffers = false;
832 int i, r; 832 int i, r;
@@ -844,7 +844,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
844 r = amdgpu_cs_handle_lockup(adev, r); 844 r = amdgpu_cs_handle_lockup(adev, r);
845 return r; 845 return r;
846 } 846 }
847 847 mutex_lock(&vm->mutex);
848 r = amdgpu_cs_parser_relocs(parser); 848 r = amdgpu_cs_parser_relocs(parser);
849 if (r == -ENOMEM) 849 if (r == -ENOMEM)
850 DRM_ERROR("Not enough memory for command submission!\n"); 850 DRM_ERROR("Not enough memory for command submission!\n");
@@ -911,12 +911,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
911 911
912 mutex_unlock(&job->job_lock); 912 mutex_unlock(&job->job_lock);
913 amdgpu_cs_parser_fini_late(parser); 913 amdgpu_cs_parser_fini_late(parser);
914 mutex_unlock(&vm->mutex);
914 return 0; 915 return 0;
915 } 916 }
916 917
917 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; 918 cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
918out: 919out:
919 amdgpu_cs_parser_fini(parser, r, reserved_buffers); 920 amdgpu_cs_parser_fini(parser, r, reserved_buffers);
921 mutex_unlock(&vm->mutex);
920 r = amdgpu_cs_handle_lockup(adev, r); 922 r = amdgpu_cs_handle_lockup(adev, r);
921 return r; 923 return r;
922} 924}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d81ab785368a..087332858853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -115,9 +115,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
115 struct amdgpu_vm *vm = &fpriv->vm; 115 struct amdgpu_vm *vm = &fpriv->vm;
116 struct amdgpu_bo_va *bo_va; 116 struct amdgpu_bo_va *bo_va;
117 int r; 117 int r;
118 118 mutex_lock(&vm->mutex);
119 r = amdgpu_bo_reserve(rbo, false); 119 r = amdgpu_bo_reserve(rbo, false);
120 if (r) { 120 if (r) {
121 mutex_unlock(&vm->mutex);
121 return r; 122 return r;
122 } 123 }
123 124
@@ -128,7 +129,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
128 ++bo_va->ref_count; 129 ++bo_va->ref_count;
129 } 130 }
130 amdgpu_bo_unreserve(rbo); 131 amdgpu_bo_unreserve(rbo);
131 132 mutex_unlock(&vm->mutex);
132 return 0; 133 return 0;
133} 134}
134 135
@@ -141,9 +142,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
141 struct amdgpu_vm *vm = &fpriv->vm; 142 struct amdgpu_vm *vm = &fpriv->vm;
142 struct amdgpu_bo_va *bo_va; 143 struct amdgpu_bo_va *bo_va;
143 int r; 144 int r;
144 145 mutex_lock(&vm->mutex);
145 r = amdgpu_bo_reserve(rbo, true); 146 r = amdgpu_bo_reserve(rbo, true);
146 if (r) { 147 if (r) {
148 mutex_unlock(&vm->mutex);
147 dev_err(adev->dev, "leaking bo va because " 149 dev_err(adev->dev, "leaking bo va because "
148 "we fail to reserve bo (%d)\n", r); 150 "we fail to reserve bo (%d)\n", r);
149 return; 151 return;
@@ -155,6 +157,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
155 } 157 }
156 } 158 }
157 amdgpu_bo_unreserve(rbo); 159 amdgpu_bo_unreserve(rbo);
160 mutex_unlock(&vm->mutex);
158} 161}
159 162
160static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) 163static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@@ -481,18 +484,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
481 goto error_unreserve; 484 goto error_unreserve;
482 } 485 }
483 486
484 mutex_lock(&bo_va->vm->mutex);
485 r = amdgpu_vm_clear_freed(adev, bo_va->vm); 487 r = amdgpu_vm_clear_freed(adev, bo_va->vm);
486 if (r) 488 if (r)
487 goto error_unlock; 489 goto error_unreserve;
488
489 490
490 if (operation == AMDGPU_VA_OP_MAP) 491 if (operation == AMDGPU_VA_OP_MAP)
491 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 492 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
492 493
493error_unlock:
494 mutex_unlock(&bo_va->vm->mutex);
495
496error_unreserve: 494error_unreserve:
497 ttm_eu_backoff_reservation(&ticket, &list); 495 ttm_eu_backoff_reservation(&ticket, &list);
498 496
@@ -549,10 +547,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
549 gobj = drm_gem_object_lookup(dev, filp, args->handle); 547 gobj = drm_gem_object_lookup(dev, filp, args->handle);
550 if (gobj == NULL) 548 if (gobj == NULL)
551 return -ENOENT; 549 return -ENOENT;
552 550 mutex_lock(&fpriv->vm.mutex);
553 rbo = gem_to_amdgpu_bo(gobj); 551 rbo = gem_to_amdgpu_bo(gobj);
554 r = amdgpu_bo_reserve(rbo, false); 552 r = amdgpu_bo_reserve(rbo, false);
555 if (r) { 553 if (r) {
554 mutex_unlock(&fpriv->vm.mutex);
556 drm_gem_object_unreference_unlocked(gobj); 555 drm_gem_object_unreference_unlocked(gobj);
557 return r; 556 return r;
558 } 557 }
@@ -560,6 +559,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
560 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); 559 bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
561 if (!bo_va) { 560 if (!bo_va) {
562 amdgpu_bo_unreserve(rbo); 561 amdgpu_bo_unreserve(rbo);
562 mutex_unlock(&fpriv->vm.mutex);
563 return -ENOENT; 563 return -ENOENT;
564 } 564 }
565 565
@@ -584,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
584 584
585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 585 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 586 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
587 587 mutex_unlock(&fpriv->vm.mutex);
588 drm_gem_object_unreference_unlocked(gobj); 588 drm_gem_object_unreference_unlocked(gobj);
589 return r; 589 return r;
590} 590}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0675524eb9de..06e207fd007b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -90,11 +90,9 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
90 struct amdgpu_bo_list_entry *list; 90 struct amdgpu_bo_list_entry *list;
91 unsigned i, idx; 91 unsigned i, idx;
92 92
93 mutex_lock(&vm->mutex);
94 list = drm_malloc_ab(vm->max_pde_used + 2, 93 list = drm_malloc_ab(vm->max_pde_used + 2,
95 sizeof(struct amdgpu_bo_list_entry)); 94 sizeof(struct amdgpu_bo_list_entry));
96 if (!list) { 95 if (!list) {
97 mutex_unlock(&vm->mutex);
98 return NULL; 96 return NULL;
99 } 97 }
100 98
@@ -119,7 +117,6 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
119 list[idx].tv.shared = true; 117 list[idx].tv.shared = true;
120 list_add(&list[idx++].tv.head, head); 118 list_add(&list[idx++].tv.head, head);
121 } 119 }
122 mutex_unlock(&vm->mutex);
123 120
124 return list; 121 return list;
125} 122}
@@ -970,9 +967,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
970 INIT_LIST_HEAD(&bo_va->invalids); 967 INIT_LIST_HEAD(&bo_va->invalids);
971 INIT_LIST_HEAD(&bo_va->vm_status); 968 INIT_LIST_HEAD(&bo_va->vm_status);
972 969
973 mutex_lock(&vm->mutex);
974 list_add_tail(&bo_va->bo_list, &bo->va); 970 list_add_tail(&bo_va->bo_list, &bo->va);
975 mutex_unlock(&vm->mutex);
976 971
977 return bo_va; 972 return bo_va;
978} 973}
@@ -1025,8 +1020,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1025 return -EINVAL; 1020 return -EINVAL;
1026 } 1021 }
1027 1022
1028 mutex_lock(&vm->mutex);
1029
1030 saddr /= AMDGPU_GPU_PAGE_SIZE; 1023 saddr /= AMDGPU_GPU_PAGE_SIZE;
1031 eaddr /= AMDGPU_GPU_PAGE_SIZE; 1024 eaddr /= AMDGPU_GPU_PAGE_SIZE;
1032 1025
@@ -1040,14 +1033,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1040 tmp->it.start, tmp->it.last + 1); 1033 tmp->it.start, tmp->it.last + 1);
1041 amdgpu_bo_unreserve(bo_va->bo); 1034 amdgpu_bo_unreserve(bo_va->bo);
1042 r = -EINVAL; 1035 r = -EINVAL;
1043 goto error_unlock; 1036 goto error;
1044 } 1037 }
1045 1038
1046 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); 1039 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
1047 if (!mapping) { 1040 if (!mapping) {
1048 amdgpu_bo_unreserve(bo_va->bo); 1041 amdgpu_bo_unreserve(bo_va->bo);
1049 r = -ENOMEM; 1042 r = -ENOMEM;
1050 goto error_unlock; 1043 goto error;
1051 } 1044 }
1052 1045
1053 INIT_LIST_HEAD(&mapping->list); 1046 INIT_LIST_HEAD(&mapping->list);
@@ -1079,9 +1072,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1079 if (vm->page_tables[pt_idx].bo) 1072 if (vm->page_tables[pt_idx].bo)
1080 continue; 1073 continue;
1081 1074
1082 /* drop mutex to allocate and clear page table */
1083 mutex_unlock(&vm->mutex);
1084
1085 ww_mutex_lock(&resv->lock, NULL); 1075 ww_mutex_lock(&resv->lock, NULL);
1086 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, 1076 r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
1087 AMDGPU_GPU_PAGE_SIZE, true, 1077 AMDGPU_GPU_PAGE_SIZE, true,
@@ -1098,32 +1088,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
1098 goto error_free; 1088 goto error_free;
1099 } 1089 }
1100 1090
1101 /* aquire mutex again */
1102 mutex_lock(&vm->mutex);
1103 if (vm->page_tables[pt_idx].bo) {
1104 /* someone else allocated the pt in the meantime */
1105 mutex_unlock(&vm->mutex);
1106 amdgpu_bo_unref(&pt);
1107 mutex_lock(&vm->mutex);
1108 continue;
1109 }
1110
1111 vm->page_tables[pt_idx].addr = 0; 1091 vm->page_tables[pt_idx].addr = 0;
1112 vm->page_tables[pt_idx].bo = pt; 1092 vm->page_tables[pt_idx].bo = pt;
1113 } 1093 }
1114 1094
1115 mutex_unlock(&vm->mutex);
1116 return 0; 1095 return 0;
1117 1096
1118error_free: 1097error_free:
1119 mutex_lock(&vm->mutex);
1120 list_del(&mapping->list); 1098 list_del(&mapping->list);
1121 interval_tree_remove(&mapping->it, &vm->va); 1099 interval_tree_remove(&mapping->it, &vm->va);
1122 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1100 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
1123 kfree(mapping); 1101 kfree(mapping);
1124 1102
1125error_unlock: 1103error:
1126 mutex_unlock(&vm->mutex);
1127 return r; 1104 return r;
1128} 1105}
1129 1106
@@ -1168,7 +1145,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1168 } 1145 }
1169 } 1146 }
1170 1147
1171 mutex_lock(&vm->mutex);
1172 list_del(&mapping->list); 1148 list_del(&mapping->list);
1173 interval_tree_remove(&mapping->it, &vm->va); 1149 interval_tree_remove(&mapping->it, &vm->va);
1174 trace_amdgpu_vm_bo_unmap(bo_va, mapping); 1150 trace_amdgpu_vm_bo_unmap(bo_va, mapping);
@@ -1177,7 +1153,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
1177 list_add(&mapping->list, &vm->freed); 1153 list_add(&mapping->list, &vm->freed);
1178 else 1154 else
1179 kfree(mapping); 1155 kfree(mapping);
1180 mutex_unlock(&vm->mutex);
1181 amdgpu_bo_unreserve(bo_va->bo); 1156 amdgpu_bo_unreserve(bo_va->bo);
1182 1157
1183 return 0; 1158 return 0;
@@ -1201,8 +1176,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1201 1176
1202 list_del(&bo_va->bo_list); 1177 list_del(&bo_va->bo_list);
1203 1178
1204 mutex_lock(&vm->mutex);
1205
1206 spin_lock(&vm->status_lock); 1179 spin_lock(&vm->status_lock);
1207 list_del(&bo_va->vm_status); 1180 list_del(&bo_va->vm_status);
1208 spin_unlock(&vm->status_lock); 1181 spin_unlock(&vm->status_lock);
@@ -1221,8 +1194,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
1221 1194
1222 fence_put(bo_va->last_pt_update); 1195 fence_put(bo_va->last_pt_update);
1223 kfree(bo_va); 1196 kfree(bo_va);
1224
1225 mutex_unlock(&vm->mutex);
1226} 1197}
1227 1198
1228/** 1199/**