diff options
author | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-04-02 11:14:48 -0400 |
---|---|---|
committer | Maarten Lankhorst <maarten.lankhorst@canonical.com> | 2014-09-02 10:41:50 -0400 |
commit | f2c24b83ae90292d315aa7ac029c6ce7929e01aa (patch) | |
tree | 4ef7d29d97cee6231becd7565056d630770d0845 | |
parent | 2f453ed4038526172292fb3250b638b3782c7f2b (diff) |
drm/ttm: flip the switch, and convert to dma_fence
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
25 files changed, 197 insertions, 386 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2d026c81ca1b..6cf7db070faf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -88,13 +88,13 @@ nv10_bo_get_tile_region(struct drm_device *dev, int i) | |||
88 | 88 | ||
89 | static void | 89 | static void |
90 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, | 90 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
91 | struct nouveau_fence *fence) | 91 | struct fence *fence) |
92 | { | 92 | { |
93 | struct nouveau_drm *drm = nouveau_drm(dev); | 93 | struct nouveau_drm *drm = nouveau_drm(dev); |
94 | 94 | ||
95 | if (tile) { | 95 | if (tile) { |
96 | spin_lock(&drm->tile.lock); | 96 | spin_lock(&drm->tile.lock); |
97 | tile->fence = nouveau_fence_ref(fence); | 97 | tile->fence = nouveau_fence_ref((struct nouveau_fence *)fence); |
98 | tile->used = false; | 98 | tile->used = false; |
99 | spin_unlock(&drm->tile.lock); | 99 | spin_unlock(&drm->tile.lock); |
100 | } | 100 | } |
@@ -976,7 +976,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |||
976 | if (ret == 0) { | 976 | if (ret == 0) { |
977 | ret = nouveau_fence_new(chan, false, &fence); | 977 | ret = nouveau_fence_new(chan, false, &fence); |
978 | if (ret == 0) { | 978 | if (ret == 0) { |
979 | ret = ttm_bo_move_accel_cleanup(bo, fence, | 979 | ret = ttm_bo_move_accel_cleanup(bo, |
980 | &fence->base, | ||
980 | evict, | 981 | evict, |
981 | no_wait_gpu, | 982 | no_wait_gpu, |
982 | new_mem); | 983 | new_mem); |
@@ -1167,8 +1168,9 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |||
1167 | { | 1168 | { |
1168 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); | 1169 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
1169 | struct drm_device *dev = drm->dev; | 1170 | struct drm_device *dev = drm->dev; |
1171 | struct fence *fence = reservation_object_get_excl(bo->resv); | ||
1170 | 1172 | ||
1171 | nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); | 1173 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
1172 | *old_tile = new_tile; | 1174 | *old_tile = new_tile; |
1173 | } | 1175 | } |
1174 | 1176 | ||
@@ -1455,47 +1457,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |||
1455 | ttm_pool_unpopulate(ttm); | 1457 | ttm_pool_unpopulate(ttm); |
1456 | } | 1458 | } |
1457 | 1459 | ||
1458 | static void | ||
1459 | nouveau_bo_fence_unref(void **sync_obj) | ||
1460 | { | ||
1461 | nouveau_fence_unref((struct nouveau_fence **)sync_obj); | ||
1462 | } | ||
1463 | |||
1464 | void | 1460 | void |
1465 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | 1461 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) |
1466 | { | 1462 | { |
1467 | struct reservation_object *resv = nvbo->bo.resv; | 1463 | struct reservation_object *resv = nvbo->bo.resv; |
1468 | 1464 | ||
1469 | nouveau_bo_fence_unref(&nvbo->bo.sync_obj); | ||
1470 | nvbo->bo.sync_obj = nouveau_fence_ref(fence); | ||
1471 | |||
1472 | reservation_object_add_excl_fence(resv, &fence->base); | 1465 | reservation_object_add_excl_fence(resv, &fence->base); |
1473 | } | 1466 | } |
1474 | 1467 | ||
1475 | static void * | ||
1476 | nouveau_bo_fence_ref(void *sync_obj) | ||
1477 | { | ||
1478 | return nouveau_fence_ref(sync_obj); | ||
1479 | } | ||
1480 | |||
1481 | static bool | ||
1482 | nouveau_bo_fence_signalled(void *sync_obj) | ||
1483 | { | ||
1484 | return nouveau_fence_done(sync_obj); | ||
1485 | } | ||
1486 | |||
1487 | static int | ||
1488 | nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) | ||
1489 | { | ||
1490 | return nouveau_fence_wait(sync_obj, lazy, intr); | ||
1491 | } | ||
1492 | |||
1493 | static int | ||
1494 | nouveau_bo_fence_flush(void *sync_obj) | ||
1495 | { | ||
1496 | return 0; | ||
1497 | } | ||
1498 | |||
1499 | struct ttm_bo_driver nouveau_bo_driver = { | 1468 | struct ttm_bo_driver nouveau_bo_driver = { |
1500 | .ttm_tt_create = &nouveau_ttm_tt_create, | 1469 | .ttm_tt_create = &nouveau_ttm_tt_create, |
1501 | .ttm_tt_populate = &nouveau_ttm_tt_populate, | 1470 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
@@ -1506,11 +1475,6 @@ struct ttm_bo_driver nouveau_bo_driver = { | |||
1506 | .move_notify = nouveau_bo_move_ntfy, | 1475 | .move_notify = nouveau_bo_move_ntfy, |
1507 | .move = nouveau_bo_move, | 1476 | .move = nouveau_bo_move, |
1508 | .verify_access = nouveau_bo_verify_access, | 1477 | .verify_access = nouveau_bo_verify_access, |
1509 | .sync_obj_signaled = nouveau_bo_fence_signalled, | ||
1510 | .sync_obj_wait = nouveau_bo_fence_wait, | ||
1511 | .sync_obj_flush = nouveau_bo_fence_flush, | ||
1512 | .sync_obj_unref = nouveau_bo_fence_unref, | ||
1513 | .sync_obj_ref = nouveau_bo_fence_ref, | ||
1514 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, | 1478 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
1515 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | 1479 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, |
1516 | .io_mem_free = &nouveau_ttm_io_mem_free, | 1480 | .io_mem_free = &nouveau_ttm_io_mem_free, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 3beb3bf130e2..5e7fa68bc438 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c | |||
@@ -185,17 +185,18 @@ static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb) | |||
185 | } | 185 | } |
186 | 186 | ||
187 | void | 187 | void |
188 | nouveau_fence_work(struct nouveau_fence *fence, | 188 | nouveau_fence_work(struct fence *fence, |
189 | void (*func)(void *), void *data) | 189 | void (*func)(void *), void *data) |
190 | { | 190 | { |
191 | struct nouveau_fence_work *work; | 191 | struct nouveau_fence_work *work; |
192 | 192 | ||
193 | if (fence_is_signaled(&fence->base)) | 193 | if (fence_is_signaled(fence)) |
194 | goto err; | 194 | goto err; |
195 | 195 | ||
196 | work = kmalloc(sizeof(*work), GFP_KERNEL); | 196 | work = kmalloc(sizeof(*work), GFP_KERNEL); |
197 | if (!work) { | 197 | if (!work) { |
198 | WARN_ON(nouveau_fence_wait(fence, false, false)); | 198 | WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence, |
199 | false, false)); | ||
199 | goto err; | 200 | goto err; |
200 | } | 201 | } |
201 | 202 | ||
@@ -203,7 +204,7 @@ nouveau_fence_work(struct nouveau_fence *fence, | |||
203 | work->func = func; | 204 | work->func = func; |
204 | work->data = data; | 205 | work->data = data; |
205 | 206 | ||
206 | if (fence_add_callback(&fence->base, &work->cb, nouveau_fence_work_cb) < 0) | 207 | if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0) |
207 | goto err_free; | 208 | goto err_free; |
208 | return; | 209 | return; |
209 | 210 | ||
@@ -349,14 +350,9 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan) | |||
349 | struct reservation_object_list *fobj; | 350 | struct reservation_object_list *fobj; |
350 | int ret = 0, i; | 351 | int ret = 0, i; |
351 | 352 | ||
352 | fence = nvbo->bo.sync_obj; | 353 | fence = reservation_object_get_excl(resv); |
353 | if (fence && fence_is_signaled(fence)) { | ||
354 | nouveau_fence_unref((struct nouveau_fence **) | ||
355 | &nvbo->bo.sync_obj); | ||
356 | fence = NULL; | ||
357 | } | ||
358 | 354 | ||
359 | if (fence) { | 355 | if (fence && !fence_is_signaled(fence)) { |
360 | struct nouveau_fence *f = from_fence(fence); | 356 | struct nouveau_fence *f = from_fence(fence); |
361 | struct nouveau_channel *prev = f->channel; | 357 | struct nouveau_channel *prev = f->channel; |
362 | 358 | ||
@@ -370,12 +366,8 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan) | |||
370 | if (ret) | 366 | if (ret) |
371 | return ret; | 367 | return ret; |
372 | 368 | ||
373 | fence = reservation_object_get_excl(resv); | ||
374 | if (fence && !nouveau_local_fence(fence, chan->drm)) | ||
375 | ret = fence_wait(fence, true); | ||
376 | |||
377 | fobj = reservation_object_get_list(resv); | 369 | fobj = reservation_object_get_list(resv); |
378 | if (!fobj || ret) | 370 | if (!fobj) |
379 | return ret; | 371 | return ret; |
380 | 372 | ||
381 | for (i = 0; i < fobj->shared_count && !ret; ++i) { | 373 | for (i = 0; i < fobj->shared_count && !ret; ++i) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 44efd8c7426c..0282e88274ff 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h | |||
@@ -26,7 +26,7 @@ void nouveau_fence_unref(struct nouveau_fence **); | |||
26 | 26 | ||
27 | int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); | 27 | int nouveau_fence_emit(struct nouveau_fence *, struct nouveau_channel *); |
28 | bool nouveau_fence_done(struct nouveau_fence *); | 28 | bool nouveau_fence_done(struct nouveau_fence *); |
29 | void nouveau_fence_work(struct nouveau_fence *, void (*)(void *), void *); | 29 | void nouveau_fence_work(struct fence *, void (*)(void *), void *); |
30 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); | 30 | int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); |
31 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *); | 31 | int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *); |
32 | 32 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index d68c9656e409..a28b5102c4a5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c | |||
@@ -98,13 +98,12 @@ static void | |||
98 | nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | 98 | nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) |
99 | { | 99 | { |
100 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; | 100 | const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; |
101 | struct nouveau_fence *fence = NULL; | 101 | struct fence *fence = NULL; |
102 | 102 | ||
103 | list_del(&vma->head); | 103 | list_del(&vma->head); |
104 | 104 | ||
105 | if (mapped) { | 105 | if (mapped) |
106 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | 106 | fence = reservation_object_get_excl(nvbo->bo.resv); |
107 | } | ||
108 | 107 | ||
109 | if (fence) { | 108 | if (fence) { |
110 | nouveau_fence_work(fence, nouveau_gem_object_delete, vma); | 109 | nouveau_fence_work(fence, nouveau_gem_object_delete, vma); |
@@ -114,7 +113,6 @@ nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |||
114 | nouveau_vm_put(vma); | 113 | nouveau_vm_put(vma); |
115 | kfree(vma); | 114 | kfree(vma); |
116 | } | 115 | } |
117 | nouveau_fence_unref(&fence); | ||
118 | } | 116 | } |
119 | 117 | ||
120 | void | 118 | void |
@@ -874,8 +872,12 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, | |||
874 | ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); | 872 | ret = ttm_bo_reserve(&nvbo->bo, true, false, false, NULL); |
875 | if (!ret) { | 873 | if (!ret) { |
876 | ret = ttm_bo_wait(&nvbo->bo, true, true, true); | 874 | ret = ttm_bo_wait(&nvbo->bo, true, true, true); |
877 | if (!no_wait && ret) | 875 | if (!no_wait && ret) { |
878 | fence = nouveau_fence_ref(nvbo->bo.sync_obj); | 876 | struct fence *excl; |
877 | |||
878 | excl = reservation_object_get_excl(nvbo->bo.resv); | ||
879 | fence = nouveau_fence_ref((struct nouveau_fence *)excl); | ||
880 | } | ||
879 | 881 | ||
880 | ttm_bo_unreserve(&nvbo->bo); | 882 | ttm_bo_unreserve(&nvbo->bo); |
881 | } | 883 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c index 0d144e0646d6..a4a63fd84803 100644 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c | |||
@@ -67,9 +67,9 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data) | |||
67 | rel = fobj ? fobj->shared_count : 0; | 67 | rel = fobj ? fobj->shared_count : 0; |
68 | rcu_read_unlock(); | 68 | rcu_read_unlock(); |
69 | 69 | ||
70 | seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n", | 70 | seq_printf(m, "size %ld, pc %d, num releases %d\n", |
71 | (unsigned long)bo->gem_base.size, bo->pin_count, | 71 | (unsigned long)bo->gem_base.size, |
72 | bo->tbo.sync_obj, rel); | 72 | bo->pin_count, rel); |
73 | } | 73 | } |
74 | spin_unlock(&qdev->release_lock); | 74 | spin_unlock(&qdev->release_lock); |
75 | return 0; | 75 | return 0; |
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 116eeae843b4..a8be87632cae 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -280,9 +280,7 @@ struct qxl_device { | |||
280 | uint8_t slot_gen_bits; | 280 | uint8_t slot_gen_bits; |
281 | uint64_t va_slot_mask; | 281 | uint64_t va_slot_mask; |
282 | 282 | ||
283 | /* XXX: when rcu becomes available, release_lock can be killed */ | ||
284 | spinlock_t release_lock; | 283 | spinlock_t release_lock; |
285 | spinlock_t fence_lock; | ||
286 | struct idr release_idr; | 284 | struct idr release_idr; |
287 | uint32_t release_seqno; | 285 | uint32_t release_seqno; |
288 | spinlock_t release_idr_lock; | 286 | spinlock_t release_idr_lock; |
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index a9e7c30e92c5..7234561e09d9 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c | |||
@@ -224,7 +224,6 @@ static int qxl_device_init(struct qxl_device *qdev, | |||
224 | idr_init(&qdev->release_idr); | 224 | idr_init(&qdev->release_idr); |
225 | spin_lock_init(&qdev->release_idr_lock); | 225 | spin_lock_init(&qdev->release_idr_lock); |
226 | spin_lock_init(&qdev->release_lock); | 226 | spin_lock_init(&qdev->release_lock); |
227 | spin_lock_init(&qdev->fence_lock); | ||
228 | 227 | ||
229 | idr_init(&qdev->surf_id_idr); | 228 | idr_init(&qdev->surf_id_idr); |
230 | spin_lock_init(&qdev->surf_id_idr_lock); | 229 | spin_lock_init(&qdev->surf_id_idr_lock); |
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index 1edaf5768086..37af1bc0dd00 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
@@ -78,8 +78,8 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | |||
78 | } | 78 | } |
79 | if (mem_type) | 79 | if (mem_type) |
80 | *mem_type = bo->tbo.mem.mem_type; | 80 | *mem_type = bo->tbo.mem.mem_type; |
81 | if (bo->tbo.sync_obj) | 81 | |
82 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | 82 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
83 | ttm_bo_unreserve(&bo->tbo); | 83 | ttm_bo_unreserve(&bo->tbo); |
84 | return r; | 84 | return r; |
85 | } | 85 | } |
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 9731d2540a40..15158c5a5b3a 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -464,9 +464,6 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) | |||
464 | bo = entry->bo; | 464 | bo = entry->bo; |
465 | qbo = to_qxl_bo(bo); | 465 | qbo = to_qxl_bo(bo); |
466 | 466 | ||
467 | if (!entry->bo->sync_obj) | ||
468 | entry->bo->sync_obj = qbo; | ||
469 | |||
470 | reservation_object_add_shared_fence(bo->resv, &release->base); | 467 | reservation_object_add_shared_fence(bo->resv, &release->base); |
471 | ttm_bo_add_to_lru(bo); | 468 | ttm_bo_add_to_lru(bo); |
472 | __ttm_bo_unreserve(bo); | 469 | __ttm_bo_unreserve(bo); |
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 29e0a758ee68..abe945a04fd4 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -357,105 +357,6 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, | |||
357 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | 357 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
358 | } | 358 | } |
359 | 359 | ||
360 | static bool qxl_sync_obj_signaled(void *sync_obj); | ||
361 | |||
362 | static int qxl_sync_obj_wait(void *sync_obj, | ||
363 | bool lazy, bool interruptible) | ||
364 | { | ||
365 | struct qxl_bo *bo = (struct qxl_bo *)sync_obj; | ||
366 | struct qxl_device *qdev = bo->gem_base.dev->dev_private; | ||
367 | struct reservation_object_list *fobj; | ||
368 | int count = 0, sc = 0, num_release = 0; | ||
369 | bool have_drawable_releases; | ||
370 | |||
371 | retry: | ||
372 | if (sc == 0) { | ||
373 | if (bo->type == QXL_GEM_DOMAIN_SURFACE) | ||
374 | qxl_update_surface(qdev, bo); | ||
375 | } else if (sc >= 1) { | ||
376 | qxl_io_notify_oom(qdev); | ||
377 | } | ||
378 | |||
379 | sc++; | ||
380 | |||
381 | for (count = 0; count < 10; count++) { | ||
382 | if (qxl_sync_obj_signaled(sync_obj)) | ||
383 | return 0; | ||
384 | |||
385 | if (!qxl_queue_garbage_collect(qdev, true)) | ||
386 | break; | ||
387 | } | ||
388 | |||
389 | have_drawable_releases = false; | ||
390 | num_release = 0; | ||
391 | |||
392 | spin_lock(&qdev->release_lock); | ||
393 | fobj = bo->tbo.resv->fence; | ||
394 | for (count = 0; fobj && count < fobj->shared_count; count++) { | ||
395 | struct qxl_release *release; | ||
396 | |||
397 | release = container_of(fobj->shared[count], | ||
398 | struct qxl_release, base); | ||
399 | |||
400 | if (fence_is_signaled(&release->base)) | ||
401 | continue; | ||
402 | |||
403 | num_release++; | ||
404 | |||
405 | if (release->type == QXL_RELEASE_DRAWABLE) | ||
406 | have_drawable_releases = true; | ||
407 | } | ||
408 | spin_unlock(&qdev->release_lock); | ||
409 | |||
410 | qxl_queue_garbage_collect(qdev, true); | ||
411 | |||
412 | if (have_drawable_releases || sc < 4) { | ||
413 | if (sc > 2) | ||
414 | /* back off */ | ||
415 | usleep_range(500, 1000); | ||
416 | if (have_drawable_releases && sc > 300) { | ||
417 | WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, num_release); | ||
418 | return -EBUSY; | ||
419 | } | ||
420 | goto retry; | ||
421 | } | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | static int qxl_sync_obj_flush(void *sync_obj) | ||
426 | { | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static void qxl_sync_obj_unref(void **sync_obj) | ||
431 | { | ||
432 | *sync_obj = NULL; | ||
433 | } | ||
434 | |||
435 | static void *qxl_sync_obj_ref(void *sync_obj) | ||
436 | { | ||
437 | return sync_obj; | ||
438 | } | ||
439 | |||
440 | static bool qxl_sync_obj_signaled(void *sync_obj) | ||
441 | { | ||
442 | struct qxl_bo *qbo = (struct qxl_bo *)sync_obj; | ||
443 | struct qxl_device *qdev = qbo->gem_base.dev->dev_private; | ||
444 | struct reservation_object_list *fobj; | ||
445 | bool ret = true; | ||
446 | unsigned i; | ||
447 | |||
448 | spin_lock(&qdev->release_lock); | ||
449 | fobj = qbo->tbo.resv->fence; | ||
450 | for (i = 0; fobj && i < fobj->shared_count; ++i) { | ||
451 | ret = fence_is_signaled(fobj->shared[i]); | ||
452 | if (!ret) | ||
453 | break; | ||
454 | } | ||
455 | spin_unlock(&qdev->release_lock); | ||
456 | return ret; | ||
457 | } | ||
458 | |||
459 | static void qxl_bo_move_notify(struct ttm_buffer_object *bo, | 360 | static void qxl_bo_move_notify(struct ttm_buffer_object *bo, |
460 | struct ttm_mem_reg *new_mem) | 361 | struct ttm_mem_reg *new_mem) |
461 | { | 362 | { |
@@ -482,11 +383,6 @@ static struct ttm_bo_driver qxl_bo_driver = { | |||
482 | .verify_access = &qxl_verify_access, | 383 | .verify_access = &qxl_verify_access, |
483 | .io_mem_reserve = &qxl_ttm_io_mem_reserve, | 384 | .io_mem_reserve = &qxl_ttm_io_mem_reserve, |
484 | .io_mem_free = &qxl_ttm_io_mem_free, | 385 | .io_mem_free = &qxl_ttm_io_mem_free, |
485 | .sync_obj_signaled = &qxl_sync_obj_signaled, | ||
486 | .sync_obj_wait = &qxl_sync_obj_wait, | ||
487 | .sync_obj_flush = &qxl_sync_obj_flush, | ||
488 | .sync_obj_unref = &qxl_sync_obj_unref, | ||
489 | .sync_obj_ref = &qxl_sync_obj_ref, | ||
490 | .move_notify = &qxl_bo_move_notify, | 386 | .move_notify = &qxl_bo_move_notify, |
491 | }; | 387 | }; |
492 | 388 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index bd328cb6fa61..6e3d1c8f3483 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -253,11 +253,17 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p) | |||
253 | int i; | 253 | int i; |
254 | 254 | ||
255 | for (i = 0; i < p->nrelocs; i++) { | 255 | for (i = 0; i < p->nrelocs; i++) { |
256 | struct reservation_object *resv; | ||
257 | struct fence *fence; | ||
258 | |||
256 | if (!p->relocs[i].robj) | 259 | if (!p->relocs[i].robj) |
257 | continue; | 260 | continue; |
258 | 261 | ||
262 | resv = p->relocs[i].robj->tbo.resv; | ||
263 | fence = reservation_object_get_excl(resv); | ||
264 | |||
259 | radeon_semaphore_sync_to(p->ib.semaphore, | 265 | radeon_semaphore_sync_to(p->ib.semaphore, |
260 | p->relocs[i].robj->tbo.sync_obj); | 266 | (struct radeon_fence *)fence); |
261 | } | 267 | } |
262 | } | 268 | } |
263 | 269 | ||
@@ -427,7 +433,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo | |||
427 | 433 | ||
428 | ttm_eu_fence_buffer_objects(&parser->ticket, | 434 | ttm_eu_fence_buffer_objects(&parser->ticket, |
429 | &parser->validated, | 435 | &parser->validated, |
430 | parser->ib.fence); | 436 | &parser->ib.fence->base); |
431 | } else if (backoff) { | 437 | } else if (backoff) { |
432 | ttm_eu_backoff_reservation(&parser->ticket, | 438 | ttm_eu_backoff_reservation(&parser->ticket, |
433 | &parser->validated); | 439 | &parser->validated); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7d0a7abdab2a..bc894c17b2f9 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -494,7 +494,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc, | |||
494 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); | 494 | DRM_ERROR("failed to pin new rbo buffer before flip\n"); |
495 | goto cleanup; | 495 | goto cleanup; |
496 | } | 496 | } |
497 | work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj); | 497 | work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); |
498 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); | 498 | radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL); |
499 | radeon_bo_unreserve(new_rbo); | 499 | radeon_bo_unreserve(new_rbo); |
500 | 500 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index 0157bc2f11f8..a69bd441dd2d 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c | |||
@@ -122,6 +122,7 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
122 | it = interval_tree_iter_first(&rmn->objects, start, end); | 122 | it = interval_tree_iter_first(&rmn->objects, start, end); |
123 | while (it) { | 123 | while (it) { |
124 | struct radeon_bo *bo; | 124 | struct radeon_bo *bo; |
125 | struct fence *fence; | ||
125 | int r; | 126 | int r; |
126 | 127 | ||
127 | bo = container_of(it, struct radeon_bo, mn_it); | 128 | bo = container_of(it, struct radeon_bo, mn_it); |
@@ -133,8 +134,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, | |||
133 | continue; | 134 | continue; |
134 | } | 135 | } |
135 | 136 | ||
136 | if (bo->tbo.sync_obj) { | 137 | fence = reservation_object_get_excl(bo->tbo.resv); |
137 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | 138 | if (fence) { |
139 | r = radeon_fence_wait((struct radeon_fence *)fence, false); | ||
138 | if (r) | 140 | if (r) |
139 | DRM_ERROR("(%d) failed to wait for user bo\n", r); | 141 | DRM_ERROR("(%d) failed to wait for user bo\n", r); |
140 | } | 142 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 378fe9ea4d44..aadbd36e64b9 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -781,8 +781,8 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) | |||
781 | return r; | 781 | return r; |
782 | if (mem_type) | 782 | if (mem_type) |
783 | *mem_type = bo->tbo.mem.mem_type; | 783 | *mem_type = bo->tbo.mem.mem_type; |
784 | if (bo->tbo.sync_obj) | 784 | |
785 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | 785 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
786 | ttm_bo_unreserve(&bo->tbo); | 786 | ttm_bo_unreserve(&bo->tbo); |
787 | return r; | 787 | return r; |
788 | } | 788 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 822eb3630045..62d1f4d730a2 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -270,12 +270,12 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, | |||
270 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | 270 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); |
271 | 271 | ||
272 | /* sync other rings */ | 272 | /* sync other rings */ |
273 | fence = bo->sync_obj; | 273 | fence = (struct radeon_fence *)reservation_object_get_excl(bo->resv); |
274 | r = radeon_copy(rdev, old_start, new_start, | 274 | r = radeon_copy(rdev, old_start, new_start, |
275 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | 275 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ |
276 | &fence); | 276 | &fence); |
277 | /* FIXME: handle copy error */ | 277 | /* FIXME: handle copy error */ |
278 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, | 278 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, |
279 | evict, no_wait_gpu, new_mem); | 279 | evict, no_wait_gpu, new_mem); |
280 | radeon_fence_unref(&fence); | 280 | radeon_fence_unref(&fence); |
281 | return r; | 281 | return r; |
@@ -488,31 +488,6 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re | |||
488 | { | 488 | { |
489 | } | 489 | } |
490 | 490 | ||
491 | static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) | ||
492 | { | ||
493 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); | ||
494 | } | ||
495 | |||
496 | static int radeon_sync_obj_flush(void *sync_obj) | ||
497 | { | ||
498 | return 0; | ||
499 | } | ||
500 | |||
501 | static void radeon_sync_obj_unref(void **sync_obj) | ||
502 | { | ||
503 | radeon_fence_unref((struct radeon_fence **)sync_obj); | ||
504 | } | ||
505 | |||
506 | static void *radeon_sync_obj_ref(void *sync_obj) | ||
507 | { | ||
508 | return radeon_fence_ref((struct radeon_fence *)sync_obj); | ||
509 | } | ||
510 | |||
511 | static bool radeon_sync_obj_signaled(void *sync_obj) | ||
512 | { | ||
513 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); | ||
514 | } | ||
515 | |||
516 | /* | 491 | /* |
517 | * TTM backend functions. | 492 | * TTM backend functions. |
518 | */ | 493 | */ |
@@ -847,11 +822,6 @@ static struct ttm_bo_driver radeon_bo_driver = { | |||
847 | .evict_flags = &radeon_evict_flags, | 822 | .evict_flags = &radeon_evict_flags, |
848 | .move = &radeon_bo_move, | 823 | .move = &radeon_bo_move, |
849 | .verify_access = &radeon_verify_access, | 824 | .verify_access = &radeon_verify_access, |
850 | .sync_obj_signaled = &radeon_sync_obj_signaled, | ||
851 | .sync_obj_wait = &radeon_sync_obj_wait, | ||
852 | .sync_obj_flush = &radeon_sync_obj_flush, | ||
853 | .sync_obj_unref = &radeon_sync_obj_unref, | ||
854 | .sync_obj_ref = &radeon_sync_obj_ref, | ||
855 | .move_notify = &radeon_bo_move_notify, | 825 | .move_notify = &radeon_bo_move_notify, |
856 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | 826 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, |
857 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, | 827 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 5729e9bebd9d..ba4f38916026 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -400,6 +400,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
400 | { | 400 | { |
401 | int32_t *msg, msg_type, handle; | 401 | int32_t *msg, msg_type, handle; |
402 | unsigned img_size = 0; | 402 | unsigned img_size = 0; |
403 | struct fence *f; | ||
403 | void *ptr; | 404 | void *ptr; |
404 | 405 | ||
405 | int i, r; | 406 | int i, r; |
@@ -409,8 +410,9 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
409 | return -EINVAL; | 410 | return -EINVAL; |
410 | } | 411 | } |
411 | 412 | ||
412 | if (bo->tbo.sync_obj) { | 413 | f = reservation_object_get_excl(bo->tbo.resv); |
413 | r = radeon_fence_wait(bo->tbo.sync_obj, false); | 414 | if (f) { |
415 | r = radeon_fence_wait((struct radeon_fence *)f, false); | ||
414 | if (r) { | 416 | if (r) { |
415 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | 417 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); |
416 | return r; | 418 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 3d9a6a036f8a..671ee566aa51 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -424,7 +424,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev, | |||
424 | if (r) | 424 | if (r) |
425 | goto error; | 425 | goto error; |
426 | 426 | ||
427 | ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); | 427 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); |
428 | radeon_ib_free(rdev, &ib); | 428 | radeon_ib_free(rdev, &ib); |
429 | 429 | ||
430 | return 0; | 430 | return 0; |
@@ -693,8 +693,14 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, | |||
693 | incr, R600_PTE_VALID); | 693 | incr, R600_PTE_VALID); |
694 | 694 | ||
695 | if (ib.length_dw != 0) { | 695 | if (ib.length_dw != 0) { |
696 | struct fence *fence; | ||
697 | |||
696 | radeon_asic_vm_pad_ib(rdev, &ib); | 698 | radeon_asic_vm_pad_ib(rdev, &ib); |
697 | radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj); | 699 | |
700 | fence = reservation_object_get_excl(pd->tbo.resv); | ||
701 | radeon_semaphore_sync_to(ib.semaphore, | ||
702 | (struct radeon_fence *)fence); | ||
703 | |||
698 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); | 704 | radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use); |
699 | WARN_ON(ib.length_dw > ndw); | 705 | WARN_ON(ib.length_dw > ndw); |
700 | r = radeon_ib_schedule(rdev, &ib, NULL, false); | 706 | r = radeon_ib_schedule(rdev, &ib, NULL, false); |
@@ -820,8 +826,11 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev, | |||
820 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; | 826 | struct radeon_bo *pt = vm->page_tables[pt_idx].bo; |
821 | unsigned nptes; | 827 | unsigned nptes; |
822 | uint64_t pte; | 828 | uint64_t pte; |
829 | struct fence *fence; | ||
823 | 830 | ||
824 | radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj); | 831 | fence = reservation_object_get_excl(pt->tbo.resv); |
832 | radeon_semaphore_sync_to(ib->semaphore, | ||
833 | (struct radeon_fence *)fence); | ||
825 | 834 | ||
826 | if ((addr & ~mask) == (end & ~mask)) | 835 | if ((addr & ~mask) == (end & ~mask)) |
827 | nptes = end - addr; | 836 | nptes = end - addr; |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 195386f16ca4..66707be386f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/file.h> | 40 | #include <linux/file.h> |
41 | #include <linux/module.h> | 41 | #include <linux/module.h> |
42 | #include <linux/atomic.h> | 42 | #include <linux/atomic.h> |
43 | #include <linux/reservation.h> | ||
43 | 44 | ||
44 | #define TTM_ASSERT_LOCKED(param) | 45 | #define TTM_ASSERT_LOCKED(param) |
45 | #define TTM_DEBUG(fmt, arg...) | 46 | #define TTM_DEBUG(fmt, arg...) |
@@ -142,7 +143,6 @@ static void ttm_bo_release_list(struct kref *list_kref) | |||
142 | BUG_ON(atomic_read(&bo->list_kref.refcount)); | 143 | BUG_ON(atomic_read(&bo->list_kref.refcount)); |
143 | BUG_ON(atomic_read(&bo->kref.refcount)); | 144 | BUG_ON(atomic_read(&bo->kref.refcount)); |
144 | BUG_ON(atomic_read(&bo->cpu_writers)); | 145 | BUG_ON(atomic_read(&bo->cpu_writers)); |
145 | BUG_ON(bo->sync_obj != NULL); | ||
146 | BUG_ON(bo->mem.mm_node != NULL); | 146 | BUG_ON(bo->mem.mm_node != NULL); |
147 | BUG_ON(!list_empty(&bo->lru)); | 147 | BUG_ON(!list_empty(&bo->lru)); |
148 | BUG_ON(!list_empty(&bo->ddestroy)); | 148 | BUG_ON(!list_empty(&bo->ddestroy)); |
@@ -403,12 +403,30 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) | |||
403 | ww_mutex_unlock (&bo->resv->lock); | 403 | ww_mutex_unlock (&bo->resv->lock); |
404 | } | 404 | } |
405 | 405 | ||
406 | static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) | ||
407 | { | ||
408 | struct reservation_object_list *fobj; | ||
409 | struct fence *fence; | ||
410 | int i; | ||
411 | |||
412 | fobj = reservation_object_get_list(bo->resv); | ||
413 | fence = reservation_object_get_excl(bo->resv); | ||
414 | if (fence && !fence->ops->signaled) | ||
415 | fence_enable_sw_signaling(fence); | ||
416 | |||
417 | for (i = 0; fobj && i < fobj->shared_count; ++i) { | ||
418 | fence = rcu_dereference_protected(fobj->shared[i], | ||
419 | reservation_object_held(bo->resv)); | ||
420 | |||
421 | if (!fence->ops->signaled) | ||
422 | fence_enable_sw_signaling(fence); | ||
423 | } | ||
424 | } | ||
425 | |||
406 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | 426 | static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) |
407 | { | 427 | { |
408 | struct ttm_bo_device *bdev = bo->bdev; | 428 | struct ttm_bo_device *bdev = bo->bdev; |
409 | struct ttm_bo_global *glob = bo->glob; | 429 | struct ttm_bo_global *glob = bo->glob; |
410 | struct ttm_bo_driver *driver = bdev->driver; | ||
411 | void *sync_obj = NULL; | ||
412 | int put_count; | 430 | int put_count; |
413 | int ret; | 431 | int ret; |
414 | 432 | ||
@@ -416,9 +434,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
416 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); | 434 | ret = __ttm_bo_reserve(bo, false, true, false, NULL); |
417 | 435 | ||
418 | if (!ret) { | 436 | if (!ret) { |
419 | (void) ttm_bo_wait(bo, false, false, true); | 437 | if (!ttm_bo_wait(bo, false, false, true)) { |
420 | |||
421 | if (!bo->sync_obj) { | ||
422 | put_count = ttm_bo_del_from_lru(bo); | 438 | put_count = ttm_bo_del_from_lru(bo); |
423 | 439 | ||
424 | spin_unlock(&glob->lru_lock); | 440 | spin_unlock(&glob->lru_lock); |
@@ -427,8 +443,8 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
427 | ttm_bo_list_ref_sub(bo, put_count, true); | 443 | ttm_bo_list_ref_sub(bo, put_count, true); |
428 | 444 | ||
429 | return; | 445 | return; |
430 | } | 446 | } else |
431 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 447 | ttm_bo_flush_all_fences(bo); |
432 | 448 | ||
433 | /* | 449 | /* |
434 | * Make NO_EVICT bos immediately available to | 450 | * Make NO_EVICT bos immediately available to |
@@ -447,14 +463,70 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) | |||
447 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | 463 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); |
448 | spin_unlock(&glob->lru_lock); | 464 | spin_unlock(&glob->lru_lock); |
449 | 465 | ||
450 | if (sync_obj) { | ||
451 | driver->sync_obj_flush(sync_obj); | ||
452 | driver->sync_obj_unref(&sync_obj); | ||
453 | } | ||
454 | schedule_delayed_work(&bdev->wq, | 466 | schedule_delayed_work(&bdev->wq, |
455 | ((HZ / 100) < 1) ? 1 : HZ / 100); | 467 | ((HZ / 100) < 1) ? 1 : HZ / 100); |
456 | } | 468 | } |
457 | 469 | ||
470 | static int ttm_bo_unreserve_and_wait(struct ttm_buffer_object *bo, | ||
471 | bool interruptible) | ||
472 | { | ||
473 | struct ttm_bo_global *glob = bo->glob; | ||
474 | struct reservation_object_list *fobj; | ||
475 | struct fence *excl = NULL; | ||
476 | struct fence **shared = NULL; | ||
477 | u32 shared_count = 0, i; | ||
478 | int ret = 0; | ||
479 | |||
480 | fobj = reservation_object_get_list(bo->resv); | ||
481 | if (fobj && fobj->shared_count) { | ||
482 | shared = kmalloc(sizeof(*shared) * fobj->shared_count, | ||
483 | GFP_KERNEL); | ||
484 | |||
485 | if (!shared) { | ||
486 | ret = -ENOMEM; | ||
487 | __ttm_bo_unreserve(bo); | ||
488 | spin_unlock(&glob->lru_lock); | ||
489 | return ret; | ||
490 | } | ||
491 | |||
492 | for (i = 0; i < fobj->shared_count; ++i) { | ||
493 | if (!fence_is_signaled(fobj->shared[i])) { | ||
494 | fence_get(fobj->shared[i]); | ||
495 | shared[shared_count++] = fobj->shared[i]; | ||
496 | } | ||
497 | } | ||
498 | if (!shared_count) { | ||
499 | kfree(shared); | ||
500 | shared = NULL; | ||
501 | } | ||
502 | } | ||
503 | |||
504 | excl = reservation_object_get_excl(bo->resv); | ||
505 | if (excl && !fence_is_signaled(excl)) | ||
506 | fence_get(excl); | ||
507 | else | ||
508 | excl = NULL; | ||
509 | |||
510 | __ttm_bo_unreserve(bo); | ||
511 | spin_unlock(&glob->lru_lock); | ||
512 | |||
513 | if (excl) { | ||
514 | ret = fence_wait(excl, interruptible); | ||
515 | fence_put(excl); | ||
516 | } | ||
517 | |||
518 | if (shared_count > 0) { | ||
519 | for (i = 0; i < shared_count; ++i) { | ||
520 | if (!ret) | ||
521 | ret = fence_wait(shared[i], interruptible); | ||
522 | fence_put(shared[i]); | ||
523 | } | ||
524 | kfree(shared); | ||
525 | } | ||
526 | |||
527 | return ret; | ||
528 | } | ||
529 | |||
458 | /** | 530 | /** |
459 | * function ttm_bo_cleanup_refs_and_unlock | 531 | * function ttm_bo_cleanup_refs_and_unlock |
460 | * If bo idle, remove from delayed- and lru lists, and unref. | 532 | * If bo idle, remove from delayed- and lru lists, and unref. |
@@ -471,8 +543,6 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
471 | bool interruptible, | 543 | bool interruptible, |
472 | bool no_wait_gpu) | 544 | bool no_wait_gpu) |
473 | { | 545 | { |
474 | struct ttm_bo_device *bdev = bo->bdev; | ||
475 | struct ttm_bo_driver *driver = bdev->driver; | ||
476 | struct ttm_bo_global *glob = bo->glob; | 546 | struct ttm_bo_global *glob = bo->glob; |
477 | int put_count; | 547 | int put_count; |
478 | int ret; | 548 | int ret; |
@@ -480,20 +550,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, | |||
480 | ret = ttm_bo_wait(bo, false, false, true); | 550 | ret = ttm_bo_wait(bo, false, false, true); |
481 | 551 | ||
482 | if (ret && !no_wait_gpu) { | 552 | if (ret && !no_wait_gpu) { |
483 | void *sync_obj; | 553 | ret = ttm_bo_unreserve_and_wait(bo, interruptible); |
484 | |||
485 | /* | ||
486 | * Take a reference to the fence and unreserve, | ||
487 | * at this point the buffer should be dead, so | ||
488 | * no new sync objects can be attached. | ||
489 | */ | ||
490 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
491 | |||
492 | __ttm_bo_unreserve(bo); | ||
493 | spin_unlock(&glob->lru_lock); | ||
494 | |||
495 | ret = driver->sync_obj_wait(sync_obj, false, interruptible); | ||
496 | driver->sync_obj_unref(&sync_obj); | ||
497 | if (ret) | 554 | if (ret) |
498 | return ret; | 555 | return ret; |
499 | 556 | ||
@@ -1498,41 +1555,51 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |||
1498 | 1555 | ||
1499 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); | 1556 | EXPORT_SYMBOL(ttm_bo_unmap_virtual); |
1500 | 1557 | ||
1501 | |||
1502 | int ttm_bo_wait(struct ttm_buffer_object *bo, | 1558 | int ttm_bo_wait(struct ttm_buffer_object *bo, |
1503 | bool lazy, bool interruptible, bool no_wait) | 1559 | bool lazy, bool interruptible, bool no_wait) |
1504 | { | 1560 | { |
1505 | struct ttm_bo_driver *driver = bo->bdev->driver; | 1561 | struct reservation_object_list *fobj; |
1506 | void *sync_obj; | 1562 | struct reservation_object *resv; |
1507 | int ret = 0; | 1563 | struct fence *excl; |
1508 | 1564 | long timeout = 15 * HZ; | |
1509 | lockdep_assert_held(&bo->resv->lock.base); | 1565 | int i; |
1510 | 1566 | ||
1511 | if (likely(bo->sync_obj == NULL)) | 1567 | resv = bo->resv; |
1512 | return 0; | 1568 | fobj = reservation_object_get_list(resv); |
1569 | excl = reservation_object_get_excl(resv); | ||
1570 | if (excl) { | ||
1571 | if (!fence_is_signaled(excl)) { | ||
1572 | if (no_wait) | ||
1573 | return -EBUSY; | ||
1513 | 1574 | ||
1514 | if (bo->sync_obj) { | 1575 | timeout = fence_wait_timeout(excl, |
1515 | if (driver->sync_obj_signaled(bo->sync_obj)) { | 1576 | interruptible, timeout); |
1516 | driver->sync_obj_unref(&bo->sync_obj); | ||
1517 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | ||
1518 | return 0; | ||
1519 | } | 1577 | } |
1578 | } | ||
1520 | 1579 | ||
1521 | if (no_wait) | 1580 | for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { |
1522 | return -EBUSY; | 1581 | struct fence *fence; |
1582 | fence = rcu_dereference_protected(fobj->shared[i], | ||
1583 | reservation_object_held(resv)); | ||
1523 | 1584 | ||
1524 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | 1585 | if (!fence_is_signaled(fence)) { |
1525 | ret = driver->sync_obj_wait(sync_obj, | 1586 | if (no_wait) |
1526 | lazy, interruptible); | 1587 | return -EBUSY; |
1527 | 1588 | ||
1528 | if (likely(ret == 0)) { | 1589 | timeout = fence_wait_timeout(fence, |
1529 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | 1590 | interruptible, timeout); |
1530 | &bo->priv_flags); | ||
1531 | driver->sync_obj_unref(&bo->sync_obj); | ||
1532 | } | 1591 | } |
1533 | driver->sync_obj_unref(&sync_obj); | ||
1534 | } | 1592 | } |
1535 | return ret; | 1593 | |
1594 | if (timeout < 0) | ||
1595 | return timeout; | ||
1596 | |||
1597 | if (timeout == 0) | ||
1598 | return -EBUSY; | ||
1599 | |||
1600 | reservation_object_add_excl_fence(resv, NULL); | ||
1601 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | ||
1602 | return 0; | ||
1536 | } | 1603 | } |
1537 | EXPORT_SYMBOL(ttm_bo_wait); | 1604 | EXPORT_SYMBOL(ttm_bo_wait); |
1538 | 1605 | ||
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 495aebf0f9c3..824af90cbe31 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/reservation.h> | ||
40 | 41 | ||
41 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) | 42 | void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
42 | { | 43 | { |
@@ -444,8 +445,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
444 | struct ttm_buffer_object **new_obj) | 445 | struct ttm_buffer_object **new_obj) |
445 | { | 446 | { |
446 | struct ttm_buffer_object *fbo; | 447 | struct ttm_buffer_object *fbo; |
447 | struct ttm_bo_device *bdev = bo->bdev; | ||
448 | struct ttm_bo_driver *driver = bdev->driver; | ||
449 | int ret; | 448 | int ret; |
450 | 449 | ||
451 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); | 450 | fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); |
@@ -466,10 +465,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, | |||
466 | drm_vma_node_reset(&fbo->vma_node); | 465 | drm_vma_node_reset(&fbo->vma_node); |
467 | atomic_set(&fbo->cpu_writers, 0); | 466 | atomic_set(&fbo->cpu_writers, 0); |
468 | 467 | ||
469 | if (bo->sync_obj) | ||
470 | fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); | ||
471 | else | ||
472 | fbo->sync_obj = NULL; | ||
473 | kref_init(&fbo->list_kref); | 468 | kref_init(&fbo->list_kref); |
474 | kref_init(&fbo->kref); | 469 | kref_init(&fbo->kref); |
475 | fbo->destroy = &ttm_transfered_destroy; | 470 | fbo->destroy = &ttm_transfered_destroy; |
@@ -642,28 +637,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) | |||
642 | EXPORT_SYMBOL(ttm_bo_kunmap); | 637 | EXPORT_SYMBOL(ttm_bo_kunmap); |
643 | 638 | ||
644 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 639 | int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
645 | void *sync_obj, | 640 | struct fence *fence, |
646 | bool evict, | 641 | bool evict, |
647 | bool no_wait_gpu, | 642 | bool no_wait_gpu, |
648 | struct ttm_mem_reg *new_mem) | 643 | struct ttm_mem_reg *new_mem) |
649 | { | 644 | { |
650 | struct ttm_bo_device *bdev = bo->bdev; | 645 | struct ttm_bo_device *bdev = bo->bdev; |
651 | struct ttm_bo_driver *driver = bdev->driver; | ||
652 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 646 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
653 | struct ttm_mem_reg *old_mem = &bo->mem; | 647 | struct ttm_mem_reg *old_mem = &bo->mem; |
654 | int ret; | 648 | int ret; |
655 | struct ttm_buffer_object *ghost_obj; | 649 | struct ttm_buffer_object *ghost_obj; |
656 | void *tmp_obj = NULL; | ||
657 | 650 | ||
658 | if (bo->sync_obj) { | 651 | reservation_object_add_excl_fence(bo->resv, fence); |
659 | tmp_obj = bo->sync_obj; | ||
660 | bo->sync_obj = NULL; | ||
661 | } | ||
662 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
663 | if (evict) { | 652 | if (evict) { |
664 | ret = ttm_bo_wait(bo, false, false, false); | 653 | ret = ttm_bo_wait(bo, false, false, false); |
665 | if (tmp_obj) | ||
666 | driver->sync_obj_unref(&tmp_obj); | ||
667 | if (ret) | 654 | if (ret) |
668 | return ret; | 655 | return ret; |
669 | 656 | ||
@@ -684,13 +671,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | |||
684 | */ | 671 | */ |
685 | 672 | ||
686 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | 673 | set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
687 | if (tmp_obj) | ||
688 | driver->sync_obj_unref(&tmp_obj); | ||
689 | 674 | ||
690 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); | 675 | ret = ttm_buffer_object_transfer(bo, &ghost_obj); |
691 | if (ret) | 676 | if (ret) |
692 | return ret; | 677 | return ret; |
693 | 678 | ||
679 | reservation_object_add_excl_fence(ghost_obj->resv, fence); | ||
680 | |||
694 | /** | 681 | /** |
695 | * If we're not moving to fixed memory, the TTM object | 682 | * If we're not moving to fixed memory, the TTM object |
696 | * needs to stay alive. Otherwhise hang it on the ghost | 683 | * needs to stay alive. Otherwhise hang it on the ghost |
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 108730e9147b..adafc0f8ec06 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c | |||
@@ -163,7 +163,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
163 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); | 163 | EXPORT_SYMBOL(ttm_eu_reserve_buffers); |
164 | 164 | ||
165 | void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | 165 | void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
166 | struct list_head *list, void *sync_obj) | 166 | struct list_head *list, struct fence *fence) |
167 | { | 167 | { |
168 | struct ttm_validate_buffer *entry; | 168 | struct ttm_validate_buffer *entry; |
169 | struct ttm_buffer_object *bo; | 169 | struct ttm_buffer_object *bo; |
@@ -183,18 +183,12 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | |||
183 | 183 | ||
184 | list_for_each_entry(entry, list, head) { | 184 | list_for_each_entry(entry, list, head) { |
185 | bo = entry->bo; | 185 | bo = entry->bo; |
186 | entry->old_sync_obj = bo->sync_obj; | 186 | reservation_object_add_excl_fence(bo->resv, fence); |
187 | bo->sync_obj = driver->sync_obj_ref(sync_obj); | ||
188 | ttm_bo_add_to_lru(bo); | 187 | ttm_bo_add_to_lru(bo); |
189 | __ttm_bo_unreserve(bo); | 188 | __ttm_bo_unreserve(bo); |
190 | } | 189 | } |
191 | spin_unlock(&glob->lru_lock); | 190 | spin_unlock(&glob->lru_lock); |
192 | if (ticket) | 191 | if (ticket) |
193 | ww_acquire_fini(ticket); | 192 | ww_acquire_fini(ticket); |
194 | |||
195 | list_for_each_entry(entry, list, head) { | ||
196 | if (entry->old_sync_obj) | ||
197 | driver->sync_obj_unref(&entry->old_sync_obj); | ||
198 | } | ||
199 | } | 193 | } |
200 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); | 194 | EXPORT_SYMBOL(ttm_eu_fence_buffer_objects); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index 4a449b2528a2..cff2bf9db9d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | |||
@@ -802,41 +802,6 @@ static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |||
802 | } | 802 | } |
803 | 803 | ||
804 | /** | 804 | /** |
805 | * FIXME: We're using the old vmware polling method to sync. | ||
806 | * Do this with fences instead. | ||
807 | */ | ||
808 | |||
809 | static void *vmw_sync_obj_ref(void *sync_obj) | ||
810 | { | ||
811 | |||
812 | return (void *) | ||
813 | vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj); | ||
814 | } | ||
815 | |||
816 | static void vmw_sync_obj_unref(void **sync_obj) | ||
817 | { | ||
818 | vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); | ||
819 | } | ||
820 | |||
821 | static int vmw_sync_obj_flush(void *sync_obj) | ||
822 | { | ||
823 | vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); | ||
824 | return 0; | ||
825 | } | ||
826 | |||
827 | static bool vmw_sync_obj_signaled(void *sync_obj) | ||
828 | { | ||
829 | return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj); | ||
830 | } | ||
831 | |||
832 | static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) | ||
833 | { | ||
834 | return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, | ||
835 | lazy, interruptible, | ||
836 | VMW_FENCE_WAIT_TIMEOUT); | ||
837 | } | ||
838 | |||
839 | /** | ||
840 | * vmw_move_notify - TTM move_notify_callback | 805 | * vmw_move_notify - TTM move_notify_callback |
841 | * | 806 | * |
842 | * @bo: The TTM buffer object about to move. | 807 | * @bo: The TTM buffer object about to move. |
@@ -873,11 +838,6 @@ struct ttm_bo_driver vmw_bo_driver = { | |||
873 | .evict_flags = vmw_evict_flags, | 838 | .evict_flags = vmw_evict_flags, |
874 | .move = NULL, | 839 | .move = NULL, |
875 | .verify_access = vmw_verify_access, | 840 | .verify_access = vmw_verify_access, |
876 | .sync_obj_signaled = vmw_sync_obj_signaled, | ||
877 | .sync_obj_wait = vmw_sync_obj_wait, | ||
878 | .sync_obj_flush = vmw_sync_obj_flush, | ||
879 | .sync_obj_unref = vmw_sync_obj_unref, | ||
880 | .sync_obj_ref = vmw_sync_obj_ref, | ||
881 | .move_notify = vmw_move_notify, | 841 | .move_notify = vmw_move_notify, |
882 | .swap_notify = vmw_swap_notify, | 842 | .swap_notify = vmw_swap_notify, |
883 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, | 843 | .fault_reserve_notify = &vmw_ttm_fault_reserve_notify, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 67aebdb13b8c..98d5afd9a9df 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
@@ -1420,22 +1420,16 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, | |||
1420 | struct vmw_fence_obj *fence) | 1420 | struct vmw_fence_obj *fence) |
1421 | { | 1421 | { |
1422 | struct ttm_bo_device *bdev = bo->bdev; | 1422 | struct ttm_bo_device *bdev = bo->bdev; |
1423 | struct vmw_fence_obj *old_fence_obj; | 1423 | |
1424 | struct vmw_private *dev_priv = | 1424 | struct vmw_private *dev_priv = |
1425 | container_of(bdev, struct vmw_private, bdev); | 1425 | container_of(bdev, struct vmw_private, bdev); |
1426 | 1426 | ||
1427 | if (fence == NULL) { | 1427 | if (fence == NULL) { |
1428 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); | 1428 | vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); |
1429 | reservation_object_add_excl_fence(bo->resv, &fence->base); | ||
1430 | fence_put(&fence->base); | ||
1429 | } else | 1431 | } else |
1430 | vmw_fence_obj_reference(fence); | 1432 | reservation_object_add_excl_fence(bo->resv, &fence->base); |
1431 | |||
1432 | reservation_object_add_excl_fence(bo->resv, &fence->base); | ||
1433 | |||
1434 | old_fence_obj = bo->sync_obj; | ||
1435 | bo->sync_obj = fence; | ||
1436 | |||
1437 | if (old_fence_obj) | ||
1438 | vmw_fence_obj_unreference(&old_fence_obj); | ||
1439 | } | 1433 | } |
1440 | 1434 | ||
1441 | /** | 1435 | /** |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 5805f4a49478..70b44917c368 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -173,7 +173,6 @@ struct ttm_tt; | |||
173 | * @lru: List head for the lru list. | 173 | * @lru: List head for the lru list. |
174 | * @ddestroy: List head for the delayed destroy list. | 174 | * @ddestroy: List head for the delayed destroy list. |
175 | * @swap: List head for swap LRU list. | 175 | * @swap: List head for swap LRU list. |
176 | * @sync_obj: Pointer to a synchronization object. | ||
177 | * @priv_flags: Flags describing buffer object internal state. | 176 | * @priv_flags: Flags describing buffer object internal state. |
178 | * @vma_node: Address space manager node. | 177 | * @vma_node: Address space manager node. |
179 | * @offset: The current GPU offset, which can have different meanings | 178 | * @offset: The current GPU offset, which can have different meanings |
@@ -240,7 +239,6 @@ struct ttm_buffer_object { | |||
240 | * Members protected by a bo reservation. | 239 | * Members protected by a bo reservation. |
241 | */ | 240 | */ |
242 | 241 | ||
243 | void *sync_obj; | ||
244 | unsigned long priv_flags; | 242 | unsigned long priv_flags; |
245 | 243 | ||
246 | struct drm_vma_offset_node vma_node; | 244 | struct drm_vma_offset_node vma_node; |
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index e1ee141e26cc..142d752fc450 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h | |||
@@ -312,11 +312,6 @@ struct ttm_mem_type_manager { | |||
312 | * @move: Callback for a driver to hook in accelerated functions to | 312 | * @move: Callback for a driver to hook in accelerated functions to |
313 | * move a buffer. | 313 | * move a buffer. |
314 | * If set to NULL, a potentially slow memcpy() move is used. | 314 | * If set to NULL, a potentially slow memcpy() move is used. |
315 | * @sync_obj_signaled: See ttm_fence_api.h | ||
316 | * @sync_obj_wait: See ttm_fence_api.h | ||
317 | * @sync_obj_flush: See ttm_fence_api.h | ||
318 | * @sync_obj_unref: See ttm_fence_api.h | ||
319 | * @sync_obj_ref: See ttm_fence_api.h | ||
320 | */ | 315 | */ |
321 | 316 | ||
322 | struct ttm_bo_driver { | 317 | struct ttm_bo_driver { |
@@ -418,23 +413,6 @@ struct ttm_bo_driver { | |||
418 | int (*verify_access) (struct ttm_buffer_object *bo, | 413 | int (*verify_access) (struct ttm_buffer_object *bo, |
419 | struct file *filp); | 414 | struct file *filp); |
420 | 415 | ||
421 | /** | ||
422 | * In case a driver writer dislikes the TTM fence objects, | ||
423 | * the driver writer can replace those with sync objects of | ||
424 | * his / her own. If it turns out that no driver writer is | ||
425 | * using these. I suggest we remove these hooks and plug in | ||
426 | * fences directly. The bo driver needs the following functionality: | ||
427 | * See the corresponding functions in the fence object API | ||
428 | * documentation. | ||
429 | */ | ||
430 | |||
431 | bool (*sync_obj_signaled) (void *sync_obj); | ||
432 | int (*sync_obj_wait) (void *sync_obj, | ||
433 | bool lazy, bool interruptible); | ||
434 | int (*sync_obj_flush) (void *sync_obj); | ||
435 | void (*sync_obj_unref) (void **sync_obj); | ||
436 | void *(*sync_obj_ref) (void *sync_obj); | ||
437 | |||
438 | /* hook to notify driver about a driver move so it | 416 | /* hook to notify driver about a driver move so it |
439 | * can do tiling things */ | 417 | * can do tiling things */ |
440 | void (*move_notify)(struct ttm_buffer_object *bo, | 418 | void (*move_notify)(struct ttm_buffer_object *bo, |
@@ -1022,7 +1000,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); | |||
1022 | * ttm_bo_move_accel_cleanup. | 1000 | * ttm_bo_move_accel_cleanup. |
1023 | * | 1001 | * |
1024 | * @bo: A pointer to a struct ttm_buffer_object. | 1002 | * @bo: A pointer to a struct ttm_buffer_object. |
1025 | * @sync_obj: A sync object that signals when moving is complete. | 1003 | * @fence: A fence object that signals when moving is complete. |
1026 | * @evict: This is an evict move. Don't return until the buffer is idle. | 1004 | * @evict: This is an evict move. Don't return until the buffer is idle. |
1027 | * @no_wait_gpu: Return immediately if the GPU is busy. | 1005 | * @no_wait_gpu: Return immediately if the GPU is busy. |
1028 | * @new_mem: struct ttm_mem_reg indicating where to move. | 1006 | * @new_mem: struct ttm_mem_reg indicating where to move. |
@@ -1036,7 +1014,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo); | |||
1036 | */ | 1014 | */ |
1037 | 1015 | ||
1038 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, | 1016 | extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, |
1039 | void *sync_obj, | 1017 | struct fence *fence, |
1040 | bool evict, bool no_wait_gpu, | 1018 | bool evict, bool no_wait_gpu, |
1041 | struct ttm_mem_reg *new_mem); | 1019 | struct ttm_mem_reg *new_mem); |
1042 | /** | 1020 | /** |
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h index 8490cb8ee0d8..ff11a424f752 100644 --- a/include/drm/ttm/ttm_execbuf_util.h +++ b/include/drm/ttm/ttm_execbuf_util.h | |||
@@ -39,16 +39,11 @@ | |||
39 | * | 39 | * |
40 | * @head: list head for thread-private list. | 40 | * @head: list head for thread-private list. |
41 | * @bo: refcounted buffer object pointer. | 41 | * @bo: refcounted buffer object pointer. |
42 | * @reserved: Indicates whether @bo has been reserved for validation. | ||
43 | * @removed: Indicates whether @bo has been removed from lru lists. | ||
44 | * @put_count: Number of outstanding references on bo::list_kref. | ||
45 | * @old_sync_obj: Pointer to a sync object about to be unreferenced | ||
46 | */ | 42 | */ |
47 | 43 | ||
48 | struct ttm_validate_buffer { | 44 | struct ttm_validate_buffer { |
49 | struct list_head head; | 45 | struct list_head head; |
50 | struct ttm_buffer_object *bo; | 46 | struct ttm_buffer_object *bo; |
51 | void *old_sync_obj; | ||
52 | }; | 47 | }; |
53 | 48 | ||
54 | /** | 49 | /** |
@@ -100,7 +95,7 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
100 | * | 95 | * |
101 | * @ticket: ww_acquire_ctx from reserve call | 96 | * @ticket: ww_acquire_ctx from reserve call |
102 | * @list: thread private list of ttm_validate_buffer structs. | 97 | * @list: thread private list of ttm_validate_buffer structs. |
103 | * @sync_obj: The new sync object for the buffers. | 98 | * @fence: The new exclusive fence for the buffers. |
104 | * | 99 | * |
105 | * This function should be called when command submission is complete, and | 100 | * This function should be called when command submission is complete, and |
106 | * it will add a new sync object to bos pointed to by entries on @list. | 101 | * it will add a new sync object to bos pointed to by entries on @list. |
@@ -109,6 +104,7 @@ extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, | |||
109 | */ | 104 | */ |
110 | 105 | ||
111 | extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, | 106 | extern void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, |
112 | struct list_head *list, void *sync_obj); | 107 | struct list_head *list, |
108 | struct fence *fence); | ||
113 | 109 | ||
114 | #endif | 110 | #endif |