diff options
author | Maarten Lankhorst <m.b.lankhorst@gmail.com> | 2012-11-28 06:25:44 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-12-10 05:21:30 -0500 |
commit | 97a875cbdf89a4638eea57c2b456c7cc4e3e8b21 (patch) | |
tree | 286ff23a3f8e389ec4fa1a6f3b58cbee4fc8e9fe /drivers/gpu/drm/radeon/radeon_ttm.c | |
parent | e7ab20197be3ee5fd75441e1cff0c7cdfea5bf1a (diff) |
drm/ttm: remove no_wait_reserve, v3
All items on the lru list are always reservable, so this is a stupid
thing to keep. Not only that, it is used in a way which would
guarantee deadlocks if it were ever to be set to block on reserve.
This is a lot of churn, but mostly because of the removal of the
argument which can be nested arbitrarily deeply in many places.
No change of code in this patch except removal of the no_wait_reserve
argument, the previous patch removed the use of no_wait_reserve.
v2:
- Warn if -EBUSY is returned on reservation, all objects on the list
should be reservable. Adjusted patch slightly due to conflicts.
v3:
- Focus on no_wait_reserve removal only.
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_ttm.c')
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ttm.c | 31 |
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 563c8edcb03b..1d8ff2f850ba 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo, | |||
216 | } | 216 | } |
217 | 217 | ||
218 | static int radeon_move_blit(struct ttm_buffer_object *bo, | 218 | static int radeon_move_blit(struct ttm_buffer_object *bo, |
219 | bool evict, int no_wait_reserve, bool no_wait_gpu, | 219 | bool evict, bool no_wait_gpu, |
220 | struct ttm_mem_reg *new_mem, | 220 | struct ttm_mem_reg *new_mem, |
221 | struct ttm_mem_reg *old_mem) | 221 | struct ttm_mem_reg *old_mem) |
222 | { | 222 | { |
@@ -266,14 +266,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
266 | &fence); | 266 | &fence); |
267 | /* FIXME: handle copy error */ | 267 | /* FIXME: handle copy error */ |
268 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, | 268 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, |
269 | evict, no_wait_reserve, no_wait_gpu, new_mem); | 269 | evict, no_wait_gpu, new_mem); |
270 | radeon_fence_unref(&fence); | 270 | radeon_fence_unref(&fence); |
271 | return r; | 271 | return r; |
272 | } | 272 | } |
273 | 273 | ||
274 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | 274 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, |
275 | bool evict, bool interruptible, | 275 | bool evict, bool interruptible, |
276 | bool no_wait_reserve, bool no_wait_gpu, | 276 | bool no_wait_gpu, |
277 | struct ttm_mem_reg *new_mem) | 277 | struct ttm_mem_reg *new_mem) |
278 | { | 278 | { |
279 | struct radeon_device *rdev; | 279 | struct radeon_device *rdev; |
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |||
294 | placement.busy_placement = &placements; | 294 | placement.busy_placement = &placements; |
295 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 295 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
296 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | 296 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
297 | interruptible, no_wait_reserve, no_wait_gpu); | 297 | interruptible, no_wait_gpu); |
298 | if (unlikely(r)) { | 298 | if (unlikely(r)) { |
299 | return r; | 299 | return r; |
300 | } | 300 | } |
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |||
308 | if (unlikely(r)) { | 308 | if (unlikely(r)) { |
309 | goto out_cleanup; | 309 | goto out_cleanup; |
310 | } | 310 | } |
311 | r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); | 311 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
312 | if (unlikely(r)) { | 312 | if (unlikely(r)) { |
313 | goto out_cleanup; | 313 | goto out_cleanup; |
314 | } | 314 | } |
315 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); | 315 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
316 | out_cleanup: | 316 | out_cleanup: |
317 | ttm_bo_mem_put(bo, &tmp_mem); | 317 | ttm_bo_mem_put(bo, &tmp_mem); |
318 | return r; | 318 | return r; |
@@ -320,7 +320,7 @@ out_cleanup: | |||
320 | 320 | ||
321 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | 321 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, |
322 | bool evict, bool interruptible, | 322 | bool evict, bool interruptible, |
323 | bool no_wait_reserve, bool no_wait_gpu, | 323 | bool no_wait_gpu, |
324 | struct ttm_mem_reg *new_mem) | 324 | struct ttm_mem_reg *new_mem) |
325 | { | 325 | { |
326 | struct radeon_device *rdev; | 326 | struct radeon_device *rdev; |
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |||
340 | placement.num_busy_placement = 1; | 340 | placement.num_busy_placement = 1; |
341 | placement.busy_placement = &placements; | 341 | placement.busy_placement = &placements; |
342 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 342 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; |
343 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); | 343 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
344 | interruptible, no_wait_gpu); | ||
344 | if (unlikely(r)) { | 345 | if (unlikely(r)) { |
345 | return r; | 346 | return r; |
346 | } | 347 | } |
347 | r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); | 348 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
348 | if (unlikely(r)) { | 349 | if (unlikely(r)) { |
349 | goto out_cleanup; | 350 | goto out_cleanup; |
350 | } | 351 | } |
351 | r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); | 352 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
352 | if (unlikely(r)) { | 353 | if (unlikely(r)) { |
353 | goto out_cleanup; | 354 | goto out_cleanup; |
354 | } | 355 | } |
@@ -359,7 +360,7 @@ out_cleanup: | |||
359 | 360 | ||
360 | static int radeon_bo_move(struct ttm_buffer_object *bo, | 361 | static int radeon_bo_move(struct ttm_buffer_object *bo, |
361 | bool evict, bool interruptible, | 362 | bool evict, bool interruptible, |
362 | bool no_wait_reserve, bool no_wait_gpu, | 363 | bool no_wait_gpu, |
363 | struct ttm_mem_reg *new_mem) | 364 | struct ttm_mem_reg *new_mem) |
364 | { | 365 | { |
365 | struct radeon_device *rdev; | 366 | struct radeon_device *rdev; |
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, | |||
388 | if (old_mem->mem_type == TTM_PL_VRAM && | 389 | if (old_mem->mem_type == TTM_PL_VRAM && |
389 | new_mem->mem_type == TTM_PL_SYSTEM) { | 390 | new_mem->mem_type == TTM_PL_SYSTEM) { |
390 | r = radeon_move_vram_ram(bo, evict, interruptible, | 391 | r = radeon_move_vram_ram(bo, evict, interruptible, |
391 | no_wait_reserve, no_wait_gpu, new_mem); | 392 | no_wait_gpu, new_mem); |
392 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | 393 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
393 | new_mem->mem_type == TTM_PL_VRAM) { | 394 | new_mem->mem_type == TTM_PL_VRAM) { |
394 | r = radeon_move_ram_vram(bo, evict, interruptible, | 395 | r = radeon_move_ram_vram(bo, evict, interruptible, |
395 | no_wait_reserve, no_wait_gpu, new_mem); | 396 | no_wait_gpu, new_mem); |
396 | } else { | 397 | } else { |
397 | r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); | 398 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
398 | } | 399 | } |
399 | 400 | ||
400 | if (r) { | 401 | if (r) { |
401 | memcpy: | 402 | memcpy: |
402 | r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 403 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
403 | } | 404 | } |
404 | return r; | 405 | return r; |
405 | } | 406 | } |