aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2014-04-02 06:40:05 -0400
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2014-09-02 10:41:50 -0400
commit2f453ed4038526172292fb3250b638b3782c7f2b (patch)
treece8ec40176f467cab73b9764b566a6e80c389953
parent29ba89b2371d466ca68973525816cf10debc2655 (diff)
drm/qxl: rework to new fence interface
Final driver! \o/ This is not a proper dma_fence because the hardware may never signal anything, so don't use dma-buf with qxl, ever. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
-rw-r--r--drivers/gpu/drm/qxl/Makefile2
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h22
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c87
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c166
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c97
9 files changed, 220 insertions, 175 deletions
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile
index ea046ba691d2..ac0d74852e11 100644
--- a/drivers/gpu/drm/qxl/Makefile
+++ b/drivers/gpu/drm/qxl/Makefile
@@ -4,6 +4,6 @@
4 4
5ccflags-y := -Iinclude/drm 5ccflags-y := -Iinclude/drm
6 6
7qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o 7qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o
8 8
9obj-$(CONFIG_DRM_QXL)+= qxl.o 9obj-$(CONFIG_DRM_QXL)+= qxl.o
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 45fad7b45486..97823644d347 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -620,11 +620,6 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
620 if (ret == -EBUSY) 620 if (ret == -EBUSY)
621 return -EBUSY; 621 return -EBUSY;
622 622
623 if (surf->fence.num_active_releases > 0 && stall == false) {
624 qxl_bo_unreserve(surf);
625 return -EBUSY;
626 }
627
628 if (stall) 623 if (stall)
629 mutex_unlock(&qdev->surf_evict_mutex); 624 mutex_unlock(&qdev->surf_evict_mutex);
630 625
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index c3c2bbdc6674..0d144e0646d6 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -57,11 +57,21 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
57 struct qxl_device *qdev = node->minor->dev->dev_private; 57 struct qxl_device *qdev = node->minor->dev->dev_private;
58 struct qxl_bo *bo; 58 struct qxl_bo *bo;
59 59
60 spin_lock(&qdev->release_lock);
60 list_for_each_entry(bo, &qdev->gem.objects, list) { 61 list_for_each_entry(bo, &qdev->gem.objects, list) {
62 struct reservation_object_list *fobj;
63 int rel;
64
65 rcu_read_lock();
66 fobj = rcu_dereference(bo->tbo.resv->fence);
67 rel = fobj ? fobj->shared_count : 0;
68 rcu_read_unlock();
69
61 seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n", 70 seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
62 (unsigned long)bo->gem_base.size, bo->pin_count, 71 (unsigned long)bo->gem_base.size, bo->pin_count,
63 bo->tbo.sync_obj, bo->fence.num_active_releases); 72 bo->tbo.sync_obj, rel);
64 } 73 }
74 spin_unlock(&qdev->release_lock);
65 return 0; 75 return 0;
66} 76}
67 77
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index f6022b703645..116eeae843b4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -31,6 +31,7 @@
31 * Definitions taken from spice-protocol, plus kernel driver specific bits. 31 * Definitions taken from spice-protocol, plus kernel driver specific bits.
32 */ 32 */
33 33
34#include <linux/fence.h>
34#include <linux/workqueue.h> 35#include <linux/workqueue.h>
35#include <linux/firmware.h> 36#include <linux/firmware.h>
36#include <linux/platform_device.h> 37#include <linux/platform_device.h>
@@ -95,13 +96,6 @@ enum {
95 QXL_INTERRUPT_IO_CMD |\ 96 QXL_INTERRUPT_IO_CMD |\
96 QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) 97 QXL_INTERRUPT_CLIENT_MONITORS_CONFIG)
97 98
98struct qxl_fence {
99 struct qxl_device *qdev;
100 uint32_t num_active_releases;
101 uint32_t *release_ids;
102 struct radix_tree_root tree;
103};
104
105struct qxl_bo { 99struct qxl_bo {
106 /* Protected by gem.mutex */ 100 /* Protected by gem.mutex */
107 struct list_head list; 101 struct list_head list;
@@ -113,13 +107,13 @@ struct qxl_bo {
113 unsigned pin_count; 107 unsigned pin_count;
114 void *kptr; 108 void *kptr;
115 int type; 109 int type;
110
116 /* Constant after initialization */ 111 /* Constant after initialization */
117 struct drm_gem_object gem_base; 112 struct drm_gem_object gem_base;
118 bool is_primary; /* is this now a primary surface */ 113 bool is_primary; /* is this now a primary surface */
119 bool hw_surf_alloc; 114 bool hw_surf_alloc;
120 struct qxl_surface surf; 115 struct qxl_surface surf;
121 uint32_t surface_id; 116 uint32_t surface_id;
122 struct qxl_fence fence; /* per bo fence - list of releases */
123 struct qxl_release *surf_create; 117 struct qxl_release *surf_create;
124}; 118};
125#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 119#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
@@ -191,6 +185,8 @@ enum {
191 * spice-protocol/qxl_dev.h */ 185 * spice-protocol/qxl_dev.h */
192#define QXL_MAX_RES 96 186#define QXL_MAX_RES 96
193struct qxl_release { 187struct qxl_release {
188 struct fence base;
189
194 int id; 190 int id;
195 int type; 191 int type;
196 uint32_t release_offset; 192 uint32_t release_offset;
@@ -284,7 +280,11 @@ struct qxl_device {
284 uint8_t slot_gen_bits; 280 uint8_t slot_gen_bits;
285 uint64_t va_slot_mask; 281 uint64_t va_slot_mask;
286 282
283 /* XXX: when rcu becomes available, release_lock can be killed */
284 spinlock_t release_lock;
285 spinlock_t fence_lock;
287 struct idr release_idr; 286 struct idr release_idr;
287 uint32_t release_seqno;
288 spinlock_t release_idr_lock; 288 spinlock_t release_idr_lock;
289 struct mutex async_io_mutex; 289 struct mutex async_io_mutex;
290 unsigned int last_sent_io_cmd; 290 unsigned int last_sent_io_cmd;
@@ -561,10 +561,4 @@ qxl_surface_lookup(struct drm_device *dev, int surface_id);
561void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); 561void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing);
562int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 562int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
563 563
564/* qxl_fence.c */
565void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
566int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
567int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
568void qxl_fence_fini(struct qxl_fence *qfence);
569
570#endif 564#endif
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
deleted file mode 100644
index c7248418117d..000000000000
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ /dev/null
@@ -1,87 +0,0 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26
27#include "qxl_drv.h"
28
29/* QXL fencing-
30
31 When we submit operations to the GPU we pass a release reference to the GPU
32 with them, the release reference is then added to the release ring when
33 the GPU is finished with that particular operation and has removed it from
34 its tree.
35
36 So we have can have multiple outstanding non linear fences per object.
37
38 From a TTM POV we only care if the object has any outstanding releases on
39 it.
40
41 we wait until all outstanding releases are processeed.
42
43 sync object is just a list of release ids that represent that fence on
44 that buffer.
45
46 we just add new releases onto the sync object attached to the object.
47
48 This currently uses a radix tree to store the list of release ids.
49
50 For some reason every so often qxl hw fails to release, things go wrong.
51*/
52/* must be called with the fence lock held */
53void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
54{
55 radix_tree_insert(&qfence->tree, rel_id, qfence);
56 qfence->num_active_releases++;
57}
58
59int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
60{
61 void *ret;
62 int retval = 0;
63
64 ret = radix_tree_delete(&qfence->tree, rel_id);
65 if (ret == qfence)
66 qfence->num_active_releases--;
67 else {
68 DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
69 retval = -ENOENT;
70 }
71 return retval;
72}
73
74
75int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
76{
77 qfence->qdev = qdev;
78 qfence->num_active_releases = 0;
79 INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
80 return 0;
81}
82
83void qxl_fence_fini(struct qxl_fence *qfence)
84{
85 kfree(qfence->release_ids);
86 qfence->num_active_releases = 0;
87}
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index fd88eb4a3f79..a9e7c30e92c5 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -223,6 +223,8 @@ static int qxl_device_init(struct qxl_device *qdev,
223 223
224 idr_init(&qdev->release_idr); 224 idr_init(&qdev->release_idr);
225 spin_lock_init(&qdev->release_idr_lock); 225 spin_lock_init(&qdev->release_idr_lock);
226 spin_lock_init(&qdev->release_lock);
227 spin_lock_init(&qdev->fence_lock);
226 228
227 idr_init(&qdev->surf_id_idr); 229 idr_init(&qdev->surf_id_idr);
228 spin_lock_init(&qdev->surf_id_idr_lock); 230 spin_lock_init(&qdev->surf_id_idr_lock);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index adad12d30372..69c104c3240f 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -36,7 +36,6 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 36 qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
37 37
38 qxl_surface_evict(qdev, bo, false); 38 qxl_surface_evict(qdev, bo, false);
39 qxl_fence_fini(&bo->fence);
40 mutex_lock(&qdev->gem.mutex); 39 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list); 40 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex); 41 mutex_unlock(&qdev->gem.mutex);
@@ -102,7 +101,6 @@ int qxl_bo_create(struct qxl_device *qdev,
102 bo->type = domain; 101 bo->type = domain;
103 bo->pin_count = pinned ? 1 : 0; 102 bo->pin_count = pinned ? 1 : 0;
104 bo->surface_id = 0; 103 bo->surface_id = 0;
105 qxl_fence_init(qdev, &bo->fence);
106 INIT_LIST_HEAD(&bo->list); 104 INIT_LIST_HEAD(&bo->list);
107 105
108 if (surf) 106 if (surf)
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4045ba873ab8..9731d2540a40 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -21,6 +21,7 @@
21 */ 21 */
22#include "qxl_drv.h" 22#include "qxl_drv.h"
23#include "qxl_object.h" 23#include "qxl_object.h"
24#include <trace/events/fence.h>
24 25
25/* 26/*
26 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 27 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate
@@ -39,6 +40,88 @@
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 40static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 41static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41 42
43static const char *qxl_get_driver_name(struct fence *fence)
44{
45 return "qxl";
46}
47
48static const char *qxl_get_timeline_name(struct fence *fence)
49{
50 return "release";
51}
52
53static bool qxl_nop_signaling(struct fence *fence)
54{
55 /* fences are always automatically signaled, so just pretend we did this.. */
56 return true;
57}
58
59static long qxl_fence_wait(struct fence *fence, bool intr, signed long timeout)
60{
61 struct qxl_device *qdev;
62 struct qxl_release *release;
63 int count = 0, sc = 0;
64 bool have_drawable_releases;
65 unsigned long cur, end = jiffies + timeout;
66
67 qdev = container_of(fence->lock, struct qxl_device, release_lock);
68 release = container_of(fence, struct qxl_release, base);
69 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
70
71retry:
72 sc++;
73
74 if (fence_is_signaled_locked(fence))
75 goto signaled;
76
77 qxl_io_notify_oom(qdev);
78
79 for (count = 0; count < 11; count++) {
80 if (!qxl_queue_garbage_collect(qdev, true))
81 break;
82
83 if (fence_is_signaled_locked(fence))
84 goto signaled;
85 }
86
87 if (fence_is_signaled_locked(fence))
88 goto signaled;
89
90 if (have_drawable_releases || sc < 4) {
91 if (sc > 2)
92 /* back off */
93 usleep_range(500, 1000);
94
95 if (time_after(jiffies, end))
96 return 0;
97
98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d "
100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc);
102 goto signaled;
103 }
104 goto retry;
105 }
106 /*
107 * yeah, original sync_obj_wait gave up after 3 spins when
108 * have_drawable_releases is not set.
109 */
110
111signaled:
112 cur = jiffies;
113 if (time_after(cur, end))
114 return 0;
115 return end - cur;
116}
117
118static const struct fence_ops qxl_fence_ops = {
119 .get_driver_name = qxl_get_driver_name,
120 .get_timeline_name = qxl_get_timeline_name,
121 .enable_signaling = qxl_nop_signaling,
122 .wait = qxl_fence_wait,
123};
124
42static uint64_t 125static uint64_t
43qxl_release_alloc(struct qxl_device *qdev, int type, 126qxl_release_alloc(struct qxl_device *qdev, int type,
44 struct qxl_release **ret) 127 struct qxl_release **ret)
@@ -46,13 +129,13 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
46 struct qxl_release *release; 129 struct qxl_release *release;
47 int handle; 130 int handle;
48 size_t size = sizeof(*release); 131 size_t size = sizeof(*release);
49 int idr_ret;
50 132
51 release = kmalloc(size, GFP_KERNEL); 133 release = kmalloc(size, GFP_KERNEL);
52 if (!release) { 134 if (!release) {
53 DRM_ERROR("Out of memory\n"); 135 DRM_ERROR("Out of memory\n");
54 return 0; 136 return 0;
55 } 137 }
138 release->base.ops = NULL;
56 release->type = type; 139 release->type = type;
57 release->release_offset = 0; 140 release->release_offset = 0;
58 release->surface_release_id = 0; 141 release->surface_release_id = 0;
@@ -60,44 +143,59 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
60 143
61 idr_preload(GFP_KERNEL); 144 idr_preload(GFP_KERNEL);
62 spin_lock(&qdev->release_idr_lock); 145 spin_lock(&qdev->release_idr_lock);
63 idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 146 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
147 release->base.seqno = ++qdev->release_seqno;
64 spin_unlock(&qdev->release_idr_lock); 148 spin_unlock(&qdev->release_idr_lock);
65 idr_preload_end(); 149 idr_preload_end();
66 handle = idr_ret; 150 if (handle < 0) {
67 if (idr_ret < 0) 151 kfree(release);
68 goto release_fail; 152 *ret = NULL;
153 return handle;
154 }
69 *ret = release; 155 *ret = release;
70 QXL_INFO(qdev, "allocated release %lld\n", handle); 156 QXL_INFO(qdev, "allocated release %lld\n", handle);
71 release->id = handle; 157 release->id = handle;
72release_fail:
73
74 return handle; 158 return handle;
75} 159}
76 160
161static void
162qxl_release_free_list(struct qxl_release *release)
163{
164 while (!list_empty(&release->bos)) {
165 struct ttm_validate_buffer *entry;
166
167 entry = container_of(release->bos.next,
168 struct ttm_validate_buffer, head);
169
170 list_del(&entry->head);
171 kfree(entry);
172 }
173}
174
77void 175void
78qxl_release_free(struct qxl_device *qdev, 176qxl_release_free(struct qxl_device *qdev,
79 struct qxl_release *release) 177 struct qxl_release *release)
80{ 178{
81 struct qxl_bo_list *entry, *tmp;
82 QXL_INFO(qdev, "release %d, type %d\n", release->id, 179 QXL_INFO(qdev, "release %d, type %d\n", release->id,
83 release->type); 180 release->type);
84 181
85 if (release->surface_release_id) 182 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 183 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87 184
88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
90 QXL_INFO(qdev, "release %llx\n",
91 drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
92 - DRM_FILE_OFFSET);
93 qxl_fence_remove_release(&bo->fence, release->id);
94 qxl_bo_unref(&bo);
95 kfree(entry);
96 }
97 spin_lock(&qdev->release_idr_lock); 185 spin_lock(&qdev->release_idr_lock);
98 idr_remove(&qdev->release_idr, release->id); 186 idr_remove(&qdev->release_idr, release->id);
99 spin_unlock(&qdev->release_idr_lock); 187 spin_unlock(&qdev->release_idr_lock);
100 kfree(release); 188
189 if (release->base.ops) {
190 WARN_ON(list_empty(&release->bos));
191 qxl_release_free_list(release);
192
193 fence_signal(&release->base);
194 fence_put(&release->base);
195 } else {
196 qxl_release_free_list(release);
197 kfree(release);
198 }
101} 199}
102 200
103static int qxl_release_bo_alloc(struct qxl_device *qdev, 201static int qxl_release_bo_alloc(struct qxl_device *qdev,
@@ -142,6 +240,10 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
142 return ret; 240 return ret;
143 } 241 }
144 242
243 ret = reservation_object_reserve_shared(bo->tbo.resv);
244 if (ret)
245 return ret;
246
145 /* allocate a surface for reserved + validated buffers */ 247 /* allocate a surface for reserved + validated buffers */
146 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); 248 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
147 if (ret) 249 if (ret)
@@ -199,6 +301,8 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
199 301
200 /* stash the release after the create command */ 302 /* stash the release after the create command */
201 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 303 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
304 if (idr_ret < 0)
305 return idr_ret;
202 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); 306 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
203 307
204 (*release)->release_offset = create_rel->release_offset + 64; 308 (*release)->release_offset = create_rel->release_offset + 64;
@@ -239,6 +343,11 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
239 } 343 }
240 344
241 idr_ret = qxl_release_alloc(qdev, type, release); 345 idr_ret = qxl_release_alloc(qdev, type, release);
346 if (idr_ret < 0) {
347 if (rbo)
348 *rbo = NULL;
349 return idr_ret;
350 }
242 351
243 mutex_lock(&qdev->release_mutex); 352 mutex_lock(&qdev->release_mutex);
244 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 353 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
@@ -319,12 +428,13 @@ void qxl_release_unmap(struct qxl_device *qdev,
319 428
320void qxl_release_fence_buffer_objects(struct qxl_release *release) 429void qxl_release_fence_buffer_objects(struct qxl_release *release)
321{ 430{
322 struct ttm_validate_buffer *entry;
323 struct ttm_buffer_object *bo; 431 struct ttm_buffer_object *bo;
324 struct ttm_bo_global *glob; 432 struct ttm_bo_global *glob;
325 struct ttm_bo_device *bdev; 433 struct ttm_bo_device *bdev;
326 struct ttm_bo_driver *driver; 434 struct ttm_bo_driver *driver;
327 struct qxl_bo *qbo; 435 struct qxl_bo *qbo;
436 struct ttm_validate_buffer *entry;
437 struct qxl_device *qdev;
328 438
329 /* if only one object on the release its the release itself 439 /* if only one object on the release its the release itself
330 since these objects are pinned no need to reserve */ 440 since these objects are pinned no need to reserve */
@@ -333,23 +443,35 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
333 443
334 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 444 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
335 bdev = bo->bdev; 445 bdev = bo->bdev;
446 qdev = container_of(bdev, struct qxl_device, mman.bdev);
447
448 /*
449 * Since we never really allocated a context and we don't want to conflict,
450 * set the highest bits. This will break if we really allow exporting of dma-bufs.
451 */
452 fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock,
453 release->id | 0xf0000000, release->base.seqno);
454 trace_fence_emit(&release->base);
455
336 driver = bdev->driver; 456 driver = bdev->driver;
337 glob = bo->glob; 457 glob = bo->glob;
338 458
339 spin_lock(&glob->lru_lock); 459 spin_lock(&glob->lru_lock);
460 /* acquire release_lock to protect bo->resv->fence and its contents */
461 spin_lock(&qdev->release_lock);
340 462
341 list_for_each_entry(entry, &release->bos, head) { 463 list_for_each_entry(entry, &release->bos, head) {
342 bo = entry->bo; 464 bo = entry->bo;
343 qbo = to_qxl_bo(bo); 465 qbo = to_qxl_bo(bo);
344 466
345 if (!entry->bo->sync_obj) 467 if (!entry->bo->sync_obj)
346 entry->bo->sync_obj = &qbo->fence; 468 entry->bo->sync_obj = qbo;
347
348 qxl_fence_add_release_locked(&qbo->fence, release->id);
349 469
470 reservation_object_add_shared_fence(bo->resv, &release->base);
350 ttm_bo_add_to_lru(bo); 471 ttm_bo_add_to_lru(bo);
351 __ttm_bo_unreserve(bo); 472 __ttm_bo_unreserve(bo);
352 } 473 }
474 spin_unlock(&qdev->release_lock);
353 spin_unlock(&glob->lru_lock); 475 spin_unlock(&glob->lru_lock);
354 ww_acquire_fini(&release->ticket); 476 ww_acquire_fini(&release->ticket);
355} 477}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index f66c59b222f1..29e0a758ee68 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -357,67 +357,67 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
357 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 357 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
358} 358}
359 359
360static bool qxl_sync_obj_signaled(void *sync_obj);
360 361
361static int qxl_sync_obj_wait(void *sync_obj, 362static int qxl_sync_obj_wait(void *sync_obj,
362 bool lazy, bool interruptible) 363 bool lazy, bool interruptible)
363{ 364{
364 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; 365 struct qxl_bo *bo = (struct qxl_bo *)sync_obj;
365 int count = 0, sc = 0; 366 struct qxl_device *qdev = bo->gem_base.dev->dev_private;
366 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); 367 struct reservation_object_list *fobj;
367 368 int count = 0, sc = 0, num_release = 0;
368 if (qfence->num_active_releases == 0) 369 bool have_drawable_releases;
369 return 0;
370 370
371retry: 371retry:
372 if (sc == 0) { 372 if (sc == 0) {
373 if (bo->type == QXL_GEM_DOMAIN_SURFACE) 373 if (bo->type == QXL_GEM_DOMAIN_SURFACE)
374 qxl_update_surface(qfence->qdev, bo); 374 qxl_update_surface(qdev, bo);
375 } else if (sc >= 1) { 375 } else if (sc >= 1) {
376 qxl_io_notify_oom(qfence->qdev); 376 qxl_io_notify_oom(qdev);
377 } 377 }
378 378
379 sc++; 379 sc++;
380 380
381 for (count = 0; count < 10; count++) { 381 for (count = 0; count < 10; count++) {
382 bool ret; 382 if (qxl_sync_obj_signaled(sync_obj))
383 ret = qxl_queue_garbage_collect(qfence->qdev, true);
384 if (ret == false)
385 break;
386
387 if (qfence->num_active_releases == 0)
388 return 0; 383 return 0;
384
385 if (!qxl_queue_garbage_collect(qdev, true))
386 break;
389 } 387 }
390 388
391 if (qfence->num_active_releases) { 389 have_drawable_releases = false;
392 bool have_drawable_releases = false; 390 num_release = 0;
393 void **slot;
394 struct radix_tree_iter iter;
395 int release_id;
396 391
397 radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) { 392 spin_lock(&qdev->release_lock);
398 struct qxl_release *release; 393 fobj = bo->tbo.resv->fence;
394 for (count = 0; fobj && count < fobj->shared_count; count++) {
395 struct qxl_release *release;
399 396
400 release_id = iter.index; 397 release = container_of(fobj->shared[count],
401 release = qxl_release_from_id_locked(qfence->qdev, release_id); 398 struct qxl_release, base);
402 if (release == NULL)
403 continue;
404 399
405 if (release->type == QXL_RELEASE_DRAWABLE) 400 if (fence_is_signaled(&release->base))
406 have_drawable_releases = true; 401 continue;
407 } 402
403 num_release++;
404
405 if (release->type == QXL_RELEASE_DRAWABLE)
406 have_drawable_releases = true;
407 }
408 spin_unlock(&qdev->release_lock);
409
410 qxl_queue_garbage_collect(qdev, true);
408 411
409 qxl_queue_garbage_collect(qfence->qdev, true); 412 if (have_drawable_releases || sc < 4) {
410 413 if (sc > 2)
411 if (have_drawable_releases || sc < 4) { 414 /* back off */
412 if (sc > 2) 415 usleep_range(500, 1000);
413 /* back off */ 416 if (have_drawable_releases && sc > 300) {
414 usleep_range(500, 1000); 417 WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, num_release);
415 if (have_drawable_releases && sc > 300) { 418 return -EBUSY;
416 WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases);
417 return -EBUSY;
418 }
419 goto retry;
420 } 419 }
420 goto retry;
421 } 421 }
422 return 0; 422 return 0;
423} 423}
@@ -439,8 +439,21 @@ static void *qxl_sync_obj_ref(void *sync_obj)
439 439
440static bool qxl_sync_obj_signaled(void *sync_obj) 440static bool qxl_sync_obj_signaled(void *sync_obj)
441{ 441{
442 struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; 442 struct qxl_bo *qbo = (struct qxl_bo *)sync_obj;
443 return (qfence->num_active_releases == 0); 443 struct qxl_device *qdev = qbo->gem_base.dev->dev_private;
444 struct reservation_object_list *fobj;
445 bool ret = true;
446 unsigned i;
447
448 spin_lock(&qdev->release_lock);
449 fobj = qbo->tbo.resv->fence;
450 for (i = 0; fobj && i < fobj->shared_count; ++i) {
451 ret = fence_is_signaled(fobj->shared[i]);
452 if (!ret)
453 break;
454 }
455 spin_unlock(&qdev->release_lock);
456 return ret;
444} 457}
445 458
446static void qxl_bo_move_notify(struct ttm_buffer_object *bo, 459static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
@@ -477,8 +490,6 @@ static struct ttm_bo_driver qxl_bo_driver = {
477 .move_notify = &qxl_bo_move_notify, 490 .move_notify = &qxl_bo_move_notify,
478}; 491};
479 492
480
481
482int qxl_ttm_init(struct qxl_device *qdev) 493int qxl_ttm_init(struct qxl_device *qdev)
483{ 494{
484 int r; 495 int r;