aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
diff options
context:
space:
mode:
authorAlex Xie <AlexBin.Xie@amd.com>2017-04-26 13:31:01 -0400
committerAlex Deucher <alexander.deucher@amd.com>2017-04-28 17:33:09 -0400
commitcca7ecb32b5920f05bb940cbe01dde19c0125620 (patch)
tree341c358bc0e549080c5a3ea775e9907d2ed4ab56 /drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
parenta6bef67e2abae4f19de8fb4ec34acb1df8156a75 (diff)
drm/amdgpu: Fix use of interruptible waiting
Either in cgs functions or for callers of cgs functions: 1. The signal interrupt can affect the expected behaviour 2. There is no good mechanism to handle the corresponding error 3. There is no chance of deadlock in these single BO waiting 4. There is no clear benefit for interruptible waiting 5. Future caller of these functions might have same issue. Signed-off-by: Alex Xie <AlexBin.Xie@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 1c7e6c28f93a..013f5f14dd75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -89,7 +89,7 @@ static int amdgpu_cgs_gmap_kmem(struct cgs_device *cgs_device, void *kmem,
89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo); 89 AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
90 if (ret) 90 if (ret)
91 return ret; 91 return ret;
92 ret = amdgpu_bo_reserve(bo, false); 92 ret = amdgpu_bo_reserve(bo, true);
93 if (unlikely(ret != 0)) 93 if (unlikely(ret != 0))
94 return ret; 94 return ret;
95 95
@@ -107,7 +107,7 @@ static int amdgpu_cgs_gunmap_kmem(struct cgs_device *cgs_device, cgs_handle_t km
107 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; 107 struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
108 108
109 if (obj) { 109 if (obj) {
110 int r = amdgpu_bo_reserve(obj, false); 110 int r = amdgpu_bo_reserve(obj, true);
111 if (likely(r == 0)) { 111 if (likely(r == 0)) {
112 amdgpu_bo_unpin(obj); 112 amdgpu_bo_unpin(obj);
113 amdgpu_bo_unreserve(obj); 113 amdgpu_bo_unreserve(obj);
@@ -215,7 +215,7 @@ static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
215 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 215 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
216 216
217 if (obj) { 217 if (obj) {
218 int r = amdgpu_bo_reserve(obj, false); 218 int r = amdgpu_bo_reserve(obj, true);
219 if (likely(r == 0)) { 219 if (likely(r == 0)) {
220 amdgpu_bo_kunmap(obj); 220 amdgpu_bo_kunmap(obj);
221 amdgpu_bo_unpin(obj); 221 amdgpu_bo_unpin(obj);
@@ -239,7 +239,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
239 min_offset = obj->placements[0].fpfn << PAGE_SHIFT; 239 min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
240 max_offset = obj->placements[0].lpfn << PAGE_SHIFT; 240 max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
241 241
242 r = amdgpu_bo_reserve(obj, false); 242 r = amdgpu_bo_reserve(obj, true);
243 if (unlikely(r != 0)) 243 if (unlikely(r != 0))
244 return r; 244 return r;
245 r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, 245 r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
@@ -252,7 +252,7 @@ static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
252{ 252{
253 int r; 253 int r;
254 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 254 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
255 r = amdgpu_bo_reserve(obj, false); 255 r = amdgpu_bo_reserve(obj, true);
256 if (unlikely(r != 0)) 256 if (unlikely(r != 0))
257 return r; 257 return r;
258 r = amdgpu_bo_unpin(obj); 258 r = amdgpu_bo_unpin(obj);
@@ -265,7 +265,7 @@ static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
265{ 265{
266 int r; 266 int r;
267 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 267 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
268 r = amdgpu_bo_reserve(obj, false); 268 r = amdgpu_bo_reserve(obj, true);
269 if (unlikely(r != 0)) 269 if (unlikely(r != 0))
270 return r; 270 return r;
271 r = amdgpu_bo_kmap(obj, map); 271 r = amdgpu_bo_kmap(obj, map);
@@ -277,7 +277,7 @@ static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t
277{ 277{
278 int r; 278 int r;
279 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; 279 struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
280 r = amdgpu_bo_reserve(obj, false); 280 r = amdgpu_bo_reserve(obj, true);
281 if (unlikely(r != 0)) 281 if (unlikely(r != 0))
282 return r; 282 return r;
283 amdgpu_bo_kunmap(obj); 283 amdgpu_bo_kunmap(obj);