aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h76
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c319
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
13 files changed, 859 insertions, 508 deletions
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb51..eb89653a7a17 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
179 uint32_t type, bool interruptible) 179 uint32_t type, bool interruptible)
180{ 180{
181 struct qxl_command cmd; 181 struct qxl_command cmd;
182 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
182 183
183 cmd.type = type; 184 cmd.type = type;
184 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 185 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
185 186
186 return qxl_ring_push(qdev->command_ring, &cmd, interruptible); 187 return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
187} 188}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
191 uint32_t type, bool interruptible) 192 uint32_t type, bool interruptible)
192{ 193{
193 struct qxl_command cmd; 194 struct qxl_command cmd;
195 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
194 196
195 cmd.type = type; 197 cmd.type = type;
196 cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); 198 cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
197 199
198 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); 200 return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
199} 201}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
214 struct qxl_release *release; 216 struct qxl_release *release;
215 uint64_t id, next_id; 217 uint64_t id, next_id;
216 int i = 0; 218 int i = 0;
217 int ret;
218 union qxl_release_info *info; 219 union qxl_release_info *info;
219 220
220 while (qxl_ring_pop(qdev->release_ring, &id)) { 221 while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
224 if (release == NULL) 225 if (release == NULL)
225 break; 226 break;
226 227
227 ret = qxl_release_reserve(qdev, release, false);
228 if (ret) {
229 qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
230 DRM_ERROR("failed to reserve release %lld\n", id);
231 }
232
233 info = qxl_release_map(qdev, release); 228 info = qxl_release_map(qdev, release);
234 next_id = info->next; 229 next_id = info->next;
235 qxl_release_unmap(qdev, release, info); 230 qxl_release_unmap(qdev, release, info);
236 231
237 qxl_release_unreserve(qdev, release);
238 QXL_INFO(qdev, "popped %lld, next %lld\n", id, 232 QXL_INFO(qdev, "popped %lld, next %lld\n", id,
239 next_id); 233 next_id);
240 234
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
259 return i; 253 return i;
260} 254}
261 255
262int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 256int qxl_alloc_bo_reserved(struct qxl_device *qdev,
257 struct qxl_release *release,
258 unsigned long size,
263 struct qxl_bo **_bo) 259 struct qxl_bo **_bo)
264{ 260{
265 struct qxl_bo *bo; 261 struct qxl_bo *bo;
266 int ret; 262 int ret;
267 263
268 ret = qxl_bo_create(qdev, size, false /* not kernel - device */, 264 ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
269 QXL_GEM_DOMAIN_VRAM, NULL, &bo); 265 false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
270 if (ret) { 266 if (ret) {
271 DRM_ERROR("failed to allocate VRAM BO\n"); 267 DRM_ERROR("failed to allocate VRAM BO\n");
272 return ret; 268 return ret;
273 } 269 }
274 ret = qxl_bo_reserve(bo, false); 270 ret = qxl_release_list_add(release, bo);
275 if (unlikely(ret != 0)) 271 if (ret)
276 goto out_unref; 272 goto out_unref;
277 273
278 *_bo = bo; 274 *_bo = bo;
279 return 0; 275 return 0;
280out_unref: 276out_unref:
281 qxl_bo_unref(&bo); 277 qxl_bo_unref(&bo);
282 return 0; 278 return ret;
283} 279}
284 280
285static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) 281static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
503 if (ret) 499 if (ret)
504 return ret; 500 return ret;
505 501
502 ret = qxl_release_reserve_list(release, true);
503 if (ret)
504 return ret;
505
506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); 506 cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
507 cmd->type = QXL_SURFACE_CMD_CREATE; 507 cmd->type = QXL_SURFACE_CMD_CREATE;
508 cmd->u.surface_create.format = surf->surf.format; 508 cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
524 524
525 surf->surf_create = release; 525 surf->surf_create = release;
526 526
527 /* no need to add a release to the fence for this bo, 527 /* no need to add a release to the fence for this surface bo,
528 since it is only released when we ask to destroy the surface 528 since it is only released when we ask to destroy the surface
529 and it would never signal otherwise */ 529 and it would never signal otherwise */
530 qxl_fence_releaseable(qdev, release);
531
532 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 530 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
533 531 qxl_release_fence_buffer_objects(release);
534 qxl_release_unreserve(qdev, release);
535 532
536 surf->hw_surf_alloc = true; 533 surf->hw_surf_alloc = true;
537 spin_lock(&qdev->surf_id_idr_lock); 534 spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
573 cmd->surface_id = id; 570 cmd->surface_id = id;
574 qxl_release_unmap(qdev, release, &cmd->release_info); 571 qxl_release_unmap(qdev, release, &cmd->release_info);
575 572
576 qxl_fence_releaseable(qdev, release);
577
578 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); 573 qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
579 574
580 qxl_release_unreserve(qdev, release); 575 qxl_release_fence_buffer_objects(release);
581
582 576
583 return 0; 577 return 0;
584} 578}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc4..835caba026d3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
179 kfree(qxl_crtc); 179 kfree(qxl_crtc);
180} 180}
181 181
182static void 182static int
183qxl_hide_cursor(struct qxl_device *qdev) 183qxl_hide_cursor(struct qxl_device *qdev)
184{ 184{
185 struct qxl_release *release; 185 struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
188 188
189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 189 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
190 &release, NULL); 190 &release, NULL);
191 if (ret)
192 return ret;
193
194 ret = qxl_release_reserve_list(release, true);
195 if (ret) {
196 qxl_release_free(qdev, release);
197 return ret;
198 }
191 199
192 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 200 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
193 cmd->type = QXL_CURSOR_HIDE; 201 cmd->type = QXL_CURSOR_HIDE;
194 qxl_release_unmap(qdev, release, &cmd->release_info); 202 qxl_release_unmap(qdev, release, &cmd->release_info);
195 203
196 qxl_fence_releaseable(qdev, release);
197 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 204 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
198 qxl_release_unreserve(qdev, release); 205 qxl_release_fence_buffer_objects(release);
206 return 0;
199} 207}
200 208
201static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, 209static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
216 224
217 int size = 64*64*4; 225 int size = 64*64*4;
218 int ret = 0; 226 int ret = 0;
219 if (!handle) { 227 if (!handle)
220 qxl_hide_cursor(qdev); 228 return qxl_hide_cursor(qdev);
221 return 0;
222 }
223 229
224 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); 230 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
225 if (!obj) { 231 if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
234 goto out_unref; 240 goto out_unref;
235 241
236 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); 242 ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
243 qxl_bo_unreserve(user_bo);
237 if (ret) 244 if (ret)
238 goto out_unreserve; 245 goto out_unref;
239 246
240 ret = qxl_bo_kmap(user_bo, &user_ptr); 247 ret = qxl_bo_kmap(user_bo, &user_ptr);
241 if (ret) 248 if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
246 &release, NULL); 253 &release, NULL);
247 if (ret) 254 if (ret)
248 goto out_kunmap; 255 goto out_kunmap;
249 ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, 256
250 &cursor_bo); 257 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
258 &cursor_bo);
251 if (ret) 259 if (ret)
252 goto out_free_release; 260 goto out_free_release;
253 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); 261
262 ret = qxl_release_reserve_list(release, false);
254 if (ret) 263 if (ret)
255 goto out_free_bo; 264 goto out_free_bo;
256 265
266 ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
267 if (ret)
268 goto out_backoff;
269
257 cursor->header.unique = 0; 270 cursor->header.unique = 0;
258 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; 271 cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
259 cursor->header.width = 64; 272 cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
269 282
270 qxl_bo_kunmap(cursor_bo); 283 qxl_bo_kunmap(cursor_bo);
271 284
272 /* finish with the userspace bo */
273 qxl_bo_kunmap(user_bo); 285 qxl_bo_kunmap(user_bo);
274 qxl_bo_unpin(user_bo);
275 qxl_bo_unreserve(user_bo);
276 drm_gem_object_unreference_unlocked(obj);
277 286
278 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); 287 cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
279 cmd->type = QXL_CURSOR_SET; 288 cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
281 cmd->u.set.position.y = qcrtc->cur_y; 290 cmd->u.set.position.y = qcrtc->cur_y;
282 291
283 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); 292 cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
284 qxl_release_add_res(qdev, release, cursor_bo);
285 293
286 cmd->u.set.visible = 1; 294 cmd->u.set.visible = 1;
287 qxl_release_unmap(qdev, release, &cmd->release_info); 295 qxl_release_unmap(qdev, release, &cmd->release_info);
288 296
289 qxl_fence_releaseable(qdev, release);
290 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 297 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
291 qxl_release_unreserve(qdev, release); 298 qxl_release_fence_buffer_objects(release);
299
300 /* finish with the userspace bo */
301 ret = qxl_bo_reserve(user_bo, false);
302 if (!ret) {
303 qxl_bo_unpin(user_bo);
304 qxl_bo_unreserve(user_bo);
305 }
306 drm_gem_object_unreference_unlocked(obj);
292 307
293 qxl_bo_unreserve(cursor_bo);
294 qxl_bo_unref(&cursor_bo); 308 qxl_bo_unref(&cursor_bo);
295 309
296 return ret; 310 return ret;
311
312out_backoff:
313 qxl_release_backoff_reserve_list(release);
297out_free_bo: 314out_free_bo:
298 qxl_bo_unref(&cursor_bo); 315 qxl_bo_unref(&cursor_bo);
299out_free_release: 316out_free_release:
300 qxl_release_unreserve(qdev, release);
301 qxl_release_free(qdev, release); 317 qxl_release_free(qdev, release);
302out_kunmap: 318out_kunmap:
303 qxl_bo_kunmap(user_bo); 319 qxl_bo_kunmap(user_bo);
304out_unpin: 320out_unpin:
305 qxl_bo_unpin(user_bo); 321 qxl_bo_unpin(user_bo);
306out_unreserve:
307 qxl_bo_unreserve(user_bo);
308out_unref: 322out_unref:
309 drm_gem_object_unreference_unlocked(obj); 323 drm_gem_object_unreference_unlocked(obj);
310 return ret; 324 return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
322 336
323 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, 337 ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
324 &release, NULL); 338 &release, NULL);
339 if (ret)
340 return ret;
341
342 ret = qxl_release_reserve_list(release, true);
343 if (ret) {
344 qxl_release_free(qdev, release);
345 return ret;
346 }
325 347
326 qcrtc->cur_x = x; 348 qcrtc->cur_x = x;
327 qcrtc->cur_y = y; 349 qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
332 cmd->u.position.y = qcrtc->cur_y; 354 cmd->u.position.y = qcrtc->cur_y;
333 qxl_release_unmap(qdev, release, &cmd->release_info); 355 qxl_release_unmap(qdev, release, &cmd->release_info);
334 356
335 qxl_fence_releaseable(qdev, release);
336 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 357 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
337 qxl_release_unreserve(qdev, release); 358 qxl_release_fence_buffer_objects(release);
359
338 return 0; 360 return 0;
339} 361}
340 362
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf9378..56e1d633875e 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
23#include "qxl_drv.h" 23#include "qxl_drv.h"
24#include "qxl_object.h" 24#include "qxl_object.h"
25 25
26static int alloc_clips(struct qxl_device *qdev,
27 struct qxl_release *release,
28 unsigned num_clips,
29 struct qxl_bo **clips_bo)
30{
31 int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
32
33 return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
34}
35
26/* returns a pointer to the already allocated qxl_rect array inside 36/* returns a pointer to the already allocated qxl_rect array inside
27 * the qxl_clip_rects. This is *not* the same as the memory allocated 37 * the qxl_clip_rects. This is *not* the same as the memory allocated
28 * on the device, it is offset to qxl_clip_rects.chunk.data */ 38 * on the device, it is offset to qxl_clip_rects.chunk.data */
29static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
30 struct qxl_drawable *drawable, 40 struct qxl_drawable *drawable,
31 unsigned num_clips, 41 unsigned num_clips,
32 struct qxl_bo **clips_bo, 42 struct qxl_bo *clips_bo)
33 struct qxl_release *release)
34{ 43{
35 struct qxl_clip_rects *dev_clips; 44 struct qxl_clip_rects *dev_clips;
36 int ret; 45 int ret;
37 int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
38 ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
39 if (ret)
40 return NULL;
41 46
42 ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); 47 ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
43 if (ret) { 48 if (ret) {
44 qxl_bo_unref(clips_bo);
45 return NULL; 49 return NULL;
46 } 50 }
47 dev_clips->num_rects = num_clips; 51 dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
52} 56}
53 57
54static int 58static int
59alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
60{
61 int ret;
62 ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
63 QXL_RELEASE_DRAWABLE, release,
64 NULL);
65 return ret;
66}
67
68static void
69free_drawable(struct qxl_device *qdev, struct qxl_release *release)
70{
71 qxl_release_free(qdev, release);
72}
73
74/* release needs to be reserved at this point */
75static int
55make_drawable(struct qxl_device *qdev, int surface, uint8_t type, 76make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
56 const struct qxl_rect *rect, 77 const struct qxl_rect *rect,
57 struct qxl_release **release) 78 struct qxl_release *release)
58{ 79{
59 struct qxl_drawable *drawable; 80 struct qxl_drawable *drawable;
60 int i, ret; 81 int i;
61 82
62 ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), 83 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
63 QXL_RELEASE_DRAWABLE, release, 84 if (!drawable)
64 NULL); 85 return -ENOMEM;
65 if (ret)
66 return ret;
67 86
68 drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
69 drawable->type = type; 87 drawable->type = type;
70 88
71 drawable->surface_id = surface; /* Only primary for now */ 89 drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
91 drawable->bbox = *rect; 109 drawable->bbox = *rect;
92 110
93 drawable->mm_time = qdev->rom->mm_clock; 111 drawable->mm_time = qdev->rom->mm_clock;
94 qxl_release_unmap(qdev, *release, &drawable->release_info); 112 qxl_release_unmap(qdev, release, &drawable->release_info);
95 return 0; 113 return 0;
96} 114}
97 115
98static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, 116static int alloc_palette_object(struct qxl_device *qdev,
117 struct qxl_release *release,
118 struct qxl_bo **palette_bo)
119{
120 return qxl_alloc_bo_reserved(qdev, release,
121 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
122 palette_bo);
123}
124
125static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
126 struct qxl_release *release,
99 const struct qxl_fb_image *qxl_fb_image) 127 const struct qxl_fb_image *qxl_fb_image)
100{ 128{
101 struct qxl_device *qdev = qxl_fb_image->qdev;
102 const struct fb_image *fb_image = &qxl_fb_image->fb_image; 129 const struct fb_image *fb_image = &qxl_fb_image->fb_image;
103 uint32_t visual = qxl_fb_image->visual; 130 uint32_t visual = qxl_fb_image->visual;
104 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; 131 const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
108 static uint64_t unique; /* we make no attempt to actually set this 135 static uint64_t unique; /* we make no attempt to actually set this
109 * correctly globaly, since that would require 136 * correctly globaly, since that would require
110 * tracking all of our palettes. */ 137 * tracking all of our palettes. */
111 138 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
112 ret = qxl_alloc_bo_reserved(qdev,
113 sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
114 palette_bo);
115
116 ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
117 pal->num_ents = 2; 139 pal->num_ents = 2;
118 pal->unique = unique++; 140 pal->unique = unique++;
119 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { 141 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
126 } 148 }
127 pal->ents[0] = bgcolor; 149 pal->ents[0] = bgcolor;
128 pal->ents[1] = fgcolor; 150 pal->ents[1] = fgcolor;
129 qxl_bo_kunmap(*palette_bo); 151 qxl_bo_kunmap(palette_bo);
130 return 0; 152 return 0;
131} 153}
132 154
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
144 const char *src = fb_image->data; 166 const char *src = fb_image->data;
145 int depth = fb_image->depth; 167 int depth = fb_image->depth;
146 struct qxl_release *release; 168 struct qxl_release *release;
147 struct qxl_bo *image_bo;
148 struct qxl_image *image; 169 struct qxl_image *image;
149 int ret; 170 int ret;
150 171 struct qxl_drm_image *dimage;
172 struct qxl_bo *palette_bo = NULL;
151 if (stride == 0) 173 if (stride == 0)
152 stride = depth * width / 8; 174 stride = depth * width / 8;
153 175
176 ret = alloc_drawable(qdev, &release);
177 if (ret)
178 return;
179
180 ret = qxl_image_alloc_objects(qdev, release,
181 &dimage,
182 height, stride);
183 if (ret)
184 goto out_free_drawable;
185
186 if (depth == 1) {
187 ret = alloc_palette_object(qdev, release, &palette_bo);
188 if (ret)
189 goto out_free_image;
190 }
191
192 /* do a reservation run over all the objects we just allocated */
193 ret = qxl_release_reserve_list(release, true);
194 if (ret)
195 goto out_free_palette;
196
154 rect.left = x; 197 rect.left = x;
155 rect.right = x + width; 198 rect.right = x + width;
156 rect.top = y; 199 rect.top = y;
157 rect.bottom = y + height; 200 rect.bottom = y + height;
158 201
159 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); 202 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
160 if (ret) 203 if (ret) {
161 return; 204 qxl_release_backoff_reserve_list(release);
205 goto out_free_palette;
206 }
162 207
163 ret = qxl_image_create(qdev, release, &image_bo, 208 ret = qxl_image_init(qdev, release, dimage,
164 (const uint8_t *)src, 0, 0, 209 (const uint8_t *)src, 0, 0,
165 width, height, depth, stride); 210 width, height, depth, stride);
166 if (ret) { 211 if (ret) {
167 qxl_release_unreserve(qdev, release); 212 qxl_release_backoff_reserve_list(release);
168 qxl_release_free(qdev, release); 213 qxl_release_free(qdev, release);
169 return; 214 return;
170 } 215 }
171 216
172 if (depth == 1) { 217 if (depth == 1) {
173 struct qxl_bo *palette_bo;
174 void *ptr; 218 void *ptr;
175 ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); 219 ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
176 qxl_release_add_res(qdev, release, palette_bo);
177 220
178 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); 221 ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
179 image = ptr; 222 image = ptr;
180 image->u.bitmap.palette = 223 image->u.bitmap.palette =
181 qxl_bo_physical_address(qdev, palette_bo, 0); 224 qxl_bo_physical_address(qdev, palette_bo, 0);
182 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); 225 qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
183 qxl_bo_unreserve(palette_bo);
184 qxl_bo_unref(&palette_bo);
185 } 226 }
186 227
187 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 228 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
199 drawable->u.copy.mask.bitmap = 0; 240 drawable->u.copy.mask.bitmap = 0;
200 241
201 drawable->u.copy.src_bitmap = 242 drawable->u.copy.src_bitmap =
202 qxl_bo_physical_address(qdev, image_bo, 0); 243 qxl_bo_physical_address(qdev, dimage->bo, 0);
203 qxl_release_unmap(qdev, release, &drawable->release_info); 244 qxl_release_unmap(qdev, release, &drawable->release_info);
204 245
205 qxl_release_add_res(qdev, release, image_bo);
206 qxl_bo_unreserve(image_bo);
207 qxl_bo_unref(&image_bo);
208
209 qxl_fence_releaseable(qdev, release);
210 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 246 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
211 qxl_release_unreserve(qdev, release); 247 qxl_release_fence_buffer_objects(release);
248
249out_free_palette:
250 if (palette_bo)
251 qxl_bo_unref(&palette_bo);
252out_free_image:
253 qxl_image_free_objects(qdev, dimage);
254out_free_drawable:
255 if (ret)
256 free_drawable(qdev, release);
212} 257}
213 258
214/* push a draw command using the given clipping rectangles as 259/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
243 int depth = qxl_fb->base.bits_per_pixel; 288 int depth = qxl_fb->base.bits_per_pixel;
244 uint8_t *surface_base; 289 uint8_t *surface_base;
245 struct qxl_release *release; 290 struct qxl_release *release;
246 struct qxl_bo *image_bo;
247 struct qxl_bo *clips_bo; 291 struct qxl_bo *clips_bo;
292 struct qxl_drm_image *dimage;
248 int ret; 293 int ret;
249 294
295 ret = alloc_drawable(qdev, &release);
296 if (ret)
297 return;
298
250 left = clips->x1; 299 left = clips->x1;
251 right = clips->x2; 300 right = clips->x2;
252 top = clips->y1; 301 top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
263 312
264 width = right - left; 313 width = right - left;
265 height = bottom - top; 314 height = bottom - top;
315
316 ret = alloc_clips(qdev, release, num_clips, &clips_bo);
317 if (ret)
318 goto out_free_drawable;
319
320 ret = qxl_image_alloc_objects(qdev, release,
321 &dimage,
322 height, stride);
323 if (ret)
324 goto out_free_clips;
325
326 /* do a reservation run over all the objects we just allocated */
327 ret = qxl_release_reserve_list(release, true);
328 if (ret)
329 goto out_free_image;
330
266 drawable_rect.left = left; 331 drawable_rect.left = left;
267 drawable_rect.right = right; 332 drawable_rect.right = right;
268 drawable_rect.top = top; 333 drawable_rect.top = top;
269 drawable_rect.bottom = bottom; 334 drawable_rect.bottom = bottom;
335
270 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, 336 ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
271 &release); 337 release);
272 if (ret) 338 if (ret)
273 return; 339 goto out_release_backoff;
274 340
275 ret = qxl_bo_kmap(bo, (void **)&surface_base); 341 ret = qxl_bo_kmap(bo, (void **)&surface_base);
276 if (ret) 342 if (ret)
277 goto out_unref; 343 goto out_release_backoff;
278 344
279 ret = qxl_image_create(qdev, release, &image_bo, surface_base, 345
280 left, top, width, height, depth, stride); 346 ret = qxl_image_init(qdev, release, dimage, surface_base,
347 left, top, width, height, depth, stride);
281 qxl_bo_kunmap(bo); 348 qxl_bo_kunmap(bo);
282 if (ret) 349 if (ret)
283 goto out_unref; 350 goto out_release_backoff;
351
352 rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
353 if (!rects)
354 goto out_release_backoff;
284 355
285 rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
286 if (!rects) {
287 qxl_bo_unref(&image_bo);
288 goto out_unref;
289 }
290 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 356 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
291 357
292 drawable->clip.type = SPICE_CLIP_TYPE_RECTS; 358 drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
293 drawable->clip.data = qxl_bo_physical_address(qdev, 359 drawable->clip.data = qxl_bo_physical_address(qdev,
294 clips_bo, 0); 360 clips_bo, 0);
295 qxl_release_add_res(qdev, release, clips_bo);
296 361
297 drawable->u.copy.src_area.top = 0; 362 drawable->u.copy.src_area.top = 0;
298 drawable->u.copy.src_area.bottom = height; 363 drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
306 drawable->u.copy.mask.pos.y = 0; 371 drawable->u.copy.mask.pos.y = 0;
307 drawable->u.copy.mask.bitmap = 0; 372 drawable->u.copy.mask.bitmap = 0;
308 373
309 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); 374 drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
310 qxl_release_unmap(qdev, release, &drawable->release_info); 375 qxl_release_unmap(qdev, release, &drawable->release_info);
311 qxl_release_add_res(qdev, release, image_bo); 376
312 qxl_bo_unreserve(image_bo);
313 qxl_bo_unref(&image_bo);
314 clips_ptr = clips; 377 clips_ptr = clips;
315 for (i = 0; i < num_clips; i++, clips_ptr += inc) { 378 for (i = 0; i < num_clips; i++, clips_ptr += inc) {
316 rects[i].left = clips_ptr->x1; 379 rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
319 rects[i].bottom = clips_ptr->y2; 382 rects[i].bottom = clips_ptr->y2;
320 } 383 }
321 qxl_bo_kunmap(clips_bo); 384 qxl_bo_kunmap(clips_bo);
322 qxl_bo_unreserve(clips_bo);
323 qxl_bo_unref(&clips_bo);
324 385
325 qxl_fence_releaseable(qdev, release);
326 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 386 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
327 qxl_release_unreserve(qdev, release); 387 qxl_release_fence_buffer_objects(release);
328 return; 388
389out_release_backoff:
390 if (ret)
391 qxl_release_backoff_reserve_list(release);
392out_free_image:
393 qxl_image_free_objects(qdev, dimage);
394out_free_clips:
395 qxl_bo_unref(&clips_bo);
396out_free_drawable:
397 /* only free drawable on error */
398 if (ret)
399 free_drawable(qdev, release);
329 400
330out_unref:
331 qxl_release_unreserve(qdev, release);
332 qxl_release_free(qdev, release);
333} 401}
334 402
335void qxl_draw_copyarea(struct qxl_device *qdev, 403void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
342 struct qxl_release *release; 410 struct qxl_release *release;
343 int ret; 411 int ret;
344 412
413 ret = alloc_drawable(qdev, &release);
414 if (ret)
415 return;
416
417 /* do a reservation run over all the objects we just allocated */
418 ret = qxl_release_reserve_list(release, true);
419 if (ret)
420 goto out_free_release;
421
345 rect.left = dx; 422 rect.left = dx;
346 rect.top = dy; 423 rect.top = dy;
347 rect.right = dx + width; 424 rect.right = dx + width;
348 rect.bottom = dy + height; 425 rect.bottom = dy + height;
349 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); 426 ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
350 if (ret) 427 if (ret) {
351 return; 428 qxl_release_backoff_reserve_list(release);
429 goto out_free_release;
430 }
352 431
353 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 432 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
354 drawable->u.copy_bits.src_pos.x = sx; 433 drawable->u.copy_bits.src_pos.x = sx;
355 drawable->u.copy_bits.src_pos.y = sy; 434 drawable->u.copy_bits.src_pos.y = sy;
356
357 qxl_release_unmap(qdev, release, &drawable->release_info); 435 qxl_release_unmap(qdev, release, &drawable->release_info);
358 qxl_fence_releaseable(qdev, release); 436
359 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 437 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
360 qxl_release_unreserve(qdev, release); 438 qxl_release_fence_buffer_objects(release);
439
440out_free_release:
441 if (ret)
442 free_drawable(qdev, release);
361} 443}
362 444
363void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) 445void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
370 struct qxl_release *release; 452 struct qxl_release *release;
371 int ret; 453 int ret;
372 454
373 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); 455 ret = alloc_drawable(qdev, &release);
374 if (ret) 456 if (ret)
375 return; 457 return;
376 458
459 /* do a reservation run over all the objects we just allocated */
460 ret = qxl_release_reserve_list(release, true);
461 if (ret)
462 goto out_free_release;
463
464 ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
465 if (ret) {
466 qxl_release_backoff_reserve_list(release);
467 goto out_free_release;
468 }
469
377 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); 470 drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
378 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; 471 drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
379 drawable->u.fill.brush.u.color = color; 472 drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
384 drawable->u.fill.mask.bitmap = 0; 477 drawable->u.fill.mask.bitmap = 0;
385 478
386 qxl_release_unmap(qdev, release, &drawable->release_info); 479 qxl_release_unmap(qdev, release, &drawable->release_info);
387 qxl_fence_releaseable(qdev, release); 480
388 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); 481 qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
389 qxl_release_unreserve(qdev, release); 482 qxl_release_fence_buffer_objects(release);
483
484out_free_release:
485 if (ret)
486 free_drawable(qdev, release);
390} 487}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a3..7e96f4f11738 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
42#include <ttm/ttm_placement.h> 42#include <ttm/ttm_placement.h>
43#include <ttm/ttm_module.h> 43#include <ttm/ttm_module.h>
44 44
45/* just for ttm_validate_buffer */
46#include <ttm/ttm_execbuf_util.h>
47
45#include <drm/qxl_drm.h> 48#include <drm/qxl_drm.h>
46#include "qxl_dev.h" 49#include "qxl_dev.h"
47 50
@@ -118,9 +121,9 @@ struct qxl_bo {
118 uint32_t surface_id; 121 uint32_t surface_id;
119 struct qxl_fence fence; /* per bo fence - list of releases */ 122 struct qxl_fence fence; /* per bo fence - list of releases */
120 struct qxl_release *surf_create; 123 struct qxl_release *surf_create;
121 atomic_t reserve_count;
122}; 124};
123#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) 125#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
126#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
124 127
125struct qxl_gem { 128struct qxl_gem {
126 struct mutex mutex; 129 struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
128}; 131};
129 132
130struct qxl_bo_list { 133struct qxl_bo_list {
131 struct list_head lhead; 134 struct ttm_validate_buffer tv;
132 struct qxl_bo *bo;
133};
134
135struct qxl_reloc_list {
136 struct list_head bos;
137}; 135};
138 136
139struct qxl_crtc { 137struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
195struct qxl_release { 193struct qxl_release {
196 int id; 194 int id;
197 int type; 195 int type;
198 int bo_count;
199 uint32_t release_offset; 196 uint32_t release_offset;
200 uint32_t surface_release_id; 197 uint32_t surface_release_id;
201 struct qxl_bo *bos[QXL_MAX_RES]; 198 struct ww_acquire_ctx ticket;
199 struct list_head bos;
200};
201
202struct qxl_drm_chunk {
203 struct list_head head;
204 struct qxl_bo *bo;
205};
206
207struct qxl_drm_image {
208 struct qxl_bo *bo;
209 struct list_head chunk_list;
202}; 210};
203 211
204struct qxl_fb_image { 212struct qxl_fb_image {
@@ -314,6 +322,7 @@ struct qxl_device {
314 struct workqueue_struct *gc_queue; 322 struct workqueue_struct *gc_queue;
315 struct work_struct gc_work; 323 struct work_struct gc_work;
316 324
325 struct work_struct fb_work;
317}; 326};
318 327
319/* forward declaration for QXL_INFO_IO */ 328/* forward declaration for QXL_INFO_IO */
@@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
433 442
434/* qxl image */ 443/* qxl image */
435 444
436int qxl_image_create(struct qxl_device *qdev, 445int qxl_image_init(struct qxl_device *qdev,
437 struct qxl_release *release, 446 struct qxl_release *release,
438 struct qxl_bo **image_bo, 447 struct qxl_drm_image *dimage,
439 const uint8_t *data, 448 const uint8_t *data,
440 int x, int y, int width, int height, 449 int x, int y, int width, int height,
441 int depth, int stride); 450 int depth, int stride);
451int
452qxl_image_alloc_objects(struct qxl_device *qdev,
453 struct qxl_release *release,
454 struct qxl_drm_image **image_ptr,
455 int height, int stride);
456void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
457
442void qxl_update_screen(struct qxl_device *qxl); 458void qxl_update_screen(struct qxl_device *qxl);
443 459
444/* qxl io operations (qxl_cmd.c) */ 460/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
459void qxl_io_flush_release(struct qxl_device *qdev); 475void qxl_io_flush_release(struct qxl_device *qdev);
460void qxl_io_flush_surfaces(struct qxl_device *qdev); 476void qxl_io_flush_surfaces(struct qxl_device *qdev);
461 477
462int qxl_release_reserve(struct qxl_device *qdev,
463 struct qxl_release *release, bool no_wait);
464void qxl_release_unreserve(struct qxl_device *qdev,
465 struct qxl_release *release);
466union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 478union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
467 struct qxl_release *release); 479 struct qxl_release *release);
468void qxl_release_unmap(struct qxl_device *qdev, 480void qxl_release_unmap(struct qxl_device *qdev,
469 struct qxl_release *release, 481 struct qxl_release *release,
470 union qxl_release_info *info); 482 union qxl_release_info *info);
471/* 483int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
472 * qxl_bo_add_resource. 484int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
473 * 485void qxl_release_backoff_reserve_list(struct qxl_release *release);
474 */ 486void qxl_release_fence_buffer_objects(struct qxl_release *release);
475void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
476 487
477int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 488int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
478 enum qxl_surface_cmd_type surface_cmd_type, 489 enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
481int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 492int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
482 int type, struct qxl_release **release, 493 int type, struct qxl_release **release,
483 struct qxl_bo **rbo); 494 struct qxl_bo **rbo);
484int qxl_fence_releaseable(struct qxl_device *qdev, 495
485 struct qxl_release *release);
486int 496int
487qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, 497qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
488 uint32_t type, bool interruptible); 498 uint32_t type, bool interruptible);
489int 499int
490qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, 500qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
491 uint32_t type, bool interruptible); 501 uint32_t type, bool interruptible);
492int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, 502int qxl_alloc_bo_reserved(struct qxl_device *qdev,
503 struct qxl_release *release,
504 unsigned long size,
493 struct qxl_bo **_bo); 505 struct qxl_bo **_bo);
494/* qxl drawing commands */ 506/* qxl drawing commands */
495 507
@@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
510 u32 sx, u32 sy, 522 u32 sx, u32 sy,
511 u32 dx, u32 dy); 523 u32 dx, u32 dy);
512 524
513uint64_t
514qxl_release_alloc(struct qxl_device *qdev, int type,
515 struct qxl_release **ret);
516
517void qxl_release_free(struct qxl_device *qdev, 525void qxl_release_free(struct qxl_device *qdev,
518 struct qxl_release *release); 526 struct qxl_release *release);
519void qxl_release_add_res(struct qxl_device *qdev, 527
520 struct qxl_release *release,
521 struct qxl_bo *bo);
522/* used by qxl_debugfs_release */ 528/* used by qxl_debugfs_release */
523struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 529struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
524 uint64_t id); 530 uint64_t id);
@@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
561int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); 567int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
562 568
563/* qxl_fence.c */ 569/* qxl_fence.c */
564int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); 570void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
565int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); 571int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
566int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); 572int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
567void qxl_fence_fini(struct qxl_fence *qfence); 573void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d684..88722f233430 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
37 37
38#define QXL_DIRTY_DELAY (HZ / 30) 38#define QXL_DIRTY_DELAY (HZ / 30)
39 39
40#define QXL_FB_OP_FILLRECT 1
41#define QXL_FB_OP_COPYAREA 2
42#define QXL_FB_OP_IMAGEBLIT 3
43
44struct qxl_fb_op {
45 struct list_head head;
46 int op_type;
47 union {
48 struct fb_fillrect fr;
49 struct fb_copyarea ca;
50 struct fb_image ib;
51 } op;
52 void *img_data;
53};
54
40struct qxl_fbdev { 55struct qxl_fbdev {
41 struct drm_fb_helper helper; 56 struct drm_fb_helper helper;
42 struct qxl_framebuffer qfb; 57 struct qxl_framebuffer qfb;
43 struct list_head fbdev_list; 58 struct list_head fbdev_list;
44 struct qxl_device *qdev; 59 struct qxl_device *qdev;
45 60
61 spinlock_t delayed_ops_lock;
62 struct list_head delayed_ops;
46 void *shadow; 63 void *shadow;
47 int size; 64 int size;
48 65
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
164 .deferred_io = qxl_deferred_io, 181 .deferred_io = qxl_deferred_io,
165}; 182};
166 183
167static void qxl_fb_fillrect(struct fb_info *info, 184static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
168 const struct fb_fillrect *fb_rect) 185 const struct fb_fillrect *fb_rect)
186{
187 struct qxl_fb_op *op;
188 unsigned long flags;
189
190 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
191 if (!op)
192 return;
193
194 op->op.fr = *fb_rect;
195 op->img_data = NULL;
196 op->op_type = QXL_FB_OP_FILLRECT;
197
198 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
199 list_add_tail(&op->head, &qfbdev->delayed_ops);
200 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
201}
202
203static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
204 const struct fb_copyarea *fb_copy)
205{
206 struct qxl_fb_op *op;
207 unsigned long flags;
208
209 op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
210 if (!op)
211 return;
212
213 op->op.ca = *fb_copy;
214 op->img_data = NULL;
215 op->op_type = QXL_FB_OP_COPYAREA;
216
217 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
218 list_add_tail(&op->head, &qfbdev->delayed_ops);
219 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
220}
221
222static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
223 const struct fb_image *fb_image)
224{
225 struct qxl_fb_op *op;
226 unsigned long flags;
227 uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
228
229 op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
230 if (!op)
231 return;
232
233 op->op.ib = *fb_image;
234 op->img_data = (void *)(op + 1);
235 op->op_type = QXL_FB_OP_IMAGEBLIT;
236
237 memcpy(op->img_data, fb_image->data, size);
238
239 op->op.ib.data = op->img_data;
240 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
241 list_add_tail(&op->head, &qfbdev->delayed_ops);
242 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
243}
244
245static void qxl_fb_fillrect_internal(struct fb_info *info,
246 const struct fb_fillrect *fb_rect)
169{ 247{
170 struct qxl_fbdev *qfbdev = info->par; 248 struct qxl_fbdev *qfbdev = info->par;
171 struct qxl_device *qdev = qfbdev->qdev; 249 struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
203 qxl_draw_fill_rec.rect = rect; 281 qxl_draw_fill_rec.rect = rect;
204 qxl_draw_fill_rec.color = color; 282 qxl_draw_fill_rec.color = color;
205 qxl_draw_fill_rec.rop = rop; 283 qxl_draw_fill_rec.rop = rop;
284
285 qxl_draw_fill(&qxl_draw_fill_rec);
286}
287
288static void qxl_fb_fillrect(struct fb_info *info,
289 const struct fb_fillrect *fb_rect)
290{
291 struct qxl_fbdev *qfbdev = info->par;
292 struct qxl_device *qdev = qfbdev->qdev;
293
206 if (!drm_can_sleep()) { 294 if (!drm_can_sleep()) {
207 qxl_io_log(qdev, 295 qxl_fb_delayed_fillrect(qfbdev, fb_rect);
208 "%s: TODO use RCU, mysterious locks with spin_lock\n", 296 schedule_work(&qdev->fb_work);
209 __func__);
210 return; 297 return;
211 } 298 }
212 qxl_draw_fill(&qxl_draw_fill_rec); 299 /* make sure any previous work is done */
300 flush_work(&qdev->fb_work);
301 qxl_fb_fillrect_internal(info, fb_rect);
213} 302}
214 303
215static void qxl_fb_copyarea(struct fb_info *info, 304static void qxl_fb_copyarea_internal(struct fb_info *info,
216 const struct fb_copyarea *region) 305 const struct fb_copyarea *region)
217{ 306{
218 struct qxl_fbdev *qfbdev = info->par; 307 struct qxl_fbdev *qfbdev = info->par;
219 308
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
223 region->dx, region->dy); 312 region->dx, region->dy);
224} 313}
225 314
315static void qxl_fb_copyarea(struct fb_info *info,
316 const struct fb_copyarea *region)
317{
318 struct qxl_fbdev *qfbdev = info->par;
319 struct qxl_device *qdev = qfbdev->qdev;
320
321 if (!drm_can_sleep()) {
322 qxl_fb_delayed_copyarea(qfbdev, region);
323 schedule_work(&qdev->fb_work);
324 return;
325 }
326 /* make sure any previous work is done */
327 flush_work(&qdev->fb_work);
328 qxl_fb_copyarea_internal(info, region);
329}
330
226static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) 331static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
227{ 332{
228 qxl_draw_opaque_fb(qxl_fb_image, 0); 333 qxl_draw_opaque_fb(qxl_fb_image, 0);
229} 334}
230 335
336static void qxl_fb_imageblit_internal(struct fb_info *info,
337 const struct fb_image *image)
338{
339 struct qxl_fbdev *qfbdev = info->par;
340 struct qxl_fb_image qxl_fb_image;
341
342 /* ensure proper order rendering operations - TODO: must do this
343 * for everything. */
344 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
345 qxl_fb_imageblit_safe(&qxl_fb_image);
346}
347
231static void qxl_fb_imageblit(struct fb_info *info, 348static void qxl_fb_imageblit(struct fb_info *info,
232 const struct fb_image *image) 349 const struct fb_image *image)
233{ 350{
234 struct qxl_fbdev *qfbdev = info->par; 351 struct qxl_fbdev *qfbdev = info->par;
235 struct qxl_device *qdev = qfbdev->qdev; 352 struct qxl_device *qdev = qfbdev->qdev;
236 struct qxl_fb_image qxl_fb_image;
237 353
238 if (!drm_can_sleep()) { 354 if (!drm_can_sleep()) {
239 /* we cannot do any ttm_bo allocation since that will fail on 355 qxl_fb_delayed_imageblit(qfbdev, image);
240 * ioremap_wc..__get_vm_area_node, so queue the work item 356 schedule_work(&qdev->fb_work);
241 * instead This can happen from printk inside an interrupt
242 * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
243 qxl_io_log(qdev,
244 "%s: TODO use RCU, mysterious locks with spin_lock\n",
245 __func__);
246 return; 357 return;
247 } 358 }
359 /* make sure any previous work is done */
360 flush_work(&qdev->fb_work);
361 qxl_fb_imageblit_internal(info, image);
362}
248 363
249 /* ensure proper order of rendering operations - TODO: must do this 364static void qxl_fb_work(struct work_struct *work)
250 * for everything. */ 365{
251 qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); 366 struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
252 qxl_fb_imageblit_safe(&qxl_fb_image); 367 unsigned long flags;
368 struct qxl_fb_op *entry, *tmp;
369 struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
370
371 /* since the irq context just adds entries to the end of the
372 list dropping the lock should be fine, as entry isn't modified
373 in the operation code */
374 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
375 list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
376 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
377 switch (entry->op_type) {
378 case QXL_FB_OP_FILLRECT:
379 qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
380 break;
381 case QXL_FB_OP_COPYAREA:
382 qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
383 break;
384 case QXL_FB_OP_IMAGEBLIT:
385 qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
386 break;
387 }
388 spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
389 list_del(&entry->head);
390 kfree(entry);
391 }
392 spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
253} 393}
254 394
255int qxl_fb_init(struct qxl_device *qdev) 395int qxl_fb_init(struct qxl_device *qdev)
256{ 396{
397 INIT_WORK(&qdev->fb_work, qxl_fb_work);
257 return 0; 398 return 0;
258} 399}
259 400
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
536 qfbdev->qdev = qdev; 677 qfbdev->qdev = qdev;
537 qdev->mode_info.qfbdev = qfbdev; 678 qdev->mode_info.qfbdev = qfbdev;
538 qfbdev->helper.funcs = &qxl_fb_helper_funcs; 679 qfbdev->helper.funcs = &qxl_fb_helper_funcs;
539 680 spin_lock_init(&qfbdev->delayed_ops_lock);
681 INIT_LIST_HEAD(&qfbdev->delayed_ops);
540 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, 682 ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
541 qxl_num_crtc /* num_crtc - QXL supports just 1 */, 683 qxl_num_crtc /* num_crtc - QXL supports just 1 */,
542 QXLFB_CONN_LIMIT); 684 QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad385..ae59e91cfb9a 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
49 49
50 For some reason every so often qxl hw fails to release, things go wrong. 50 For some reason every so often qxl hw fails to release, things go wrong.
51*/ 51*/
52 52/* must be called with the fence lock held */
53 53void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
54int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
55{ 54{
56 struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
57
58 spin_lock(&bo->tbo.bdev->fence_lock);
59 radix_tree_insert(&qfence->tree, rel_id, qfence); 55 radix_tree_insert(&qfence->tree, rel_id, qfence);
60 qfence->num_active_releases++; 56 qfence->num_active_releases++;
61 spin_unlock(&bo->tbo.bdev->fence_lock);
62 return 0;
63} 57}
64 58
65int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) 59int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabba..25e1777fb0a2 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
55 /* At least align on page size */ 55 /* At least align on page size */
56 if (alignment < PAGE_SIZE) 56 if (alignment < PAGE_SIZE)
57 alignment = PAGE_SIZE; 57 alignment = PAGE_SIZE;
58 r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); 58 r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
59 if (r) { 59 if (r) {
60 if (r != -ERESTARTSYS) 60 if (r != -ERESTARTSYS)
61 DRM_ERROR( 61 DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996b..7fbcc35e8ad3 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
30#include "qxl_object.h" 30#include "qxl_object.h"
31 31
32static int 32static int
33qxl_image_create_helper(struct qxl_device *qdev, 33qxl_allocate_chunk(struct qxl_device *qdev,
34 struct qxl_release *release,
35 struct qxl_drm_image *image,
36 unsigned int chunk_size)
37{
38 struct qxl_drm_chunk *chunk;
39 int ret;
40
41 chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
42 if (!chunk)
43 return -ENOMEM;
44
45 ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
46 if (ret) {
47 kfree(chunk);
48 return ret;
49 }
50
51 list_add_tail(&chunk->head, &image->chunk_list);
52 return 0;
53}
54
55int
56qxl_image_alloc_objects(struct qxl_device *qdev,
34 struct qxl_release *release, 57 struct qxl_release *release,
35 struct qxl_bo **image_bo, 58 struct qxl_drm_image **image_ptr,
36 const uint8_t *data, 59 int height, int stride)
37 int width, int height, 60{
38 int depth, unsigned int hash, 61 struct qxl_drm_image *image;
39 int stride) 62 int ret;
63
64 image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
65 if (!image)
66 return -ENOMEM;
67
68 INIT_LIST_HEAD(&image->chunk_list);
69
70 ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
71 if (ret) {
72 kfree(image);
73 return ret;
74 }
75
76 ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
77 if (ret) {
78 qxl_bo_unref(&image->bo);
79 kfree(image);
80 return ret;
81 }
82 *image_ptr = image;
83 return 0;
84}
85
86void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
40{ 87{
88 struct qxl_drm_chunk *chunk, *tmp;
89
90 list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
91 qxl_bo_unref(&chunk->bo);
92 kfree(chunk);
93 }
94
95 qxl_bo_unref(&dimage->bo);
96 kfree(dimage);
97}
98
99static int
100qxl_image_init_helper(struct qxl_device *qdev,
101 struct qxl_release *release,
102 struct qxl_drm_image *dimage,
103 const uint8_t *data,
104 int width, int height,
105 int depth, unsigned int hash,
106 int stride)
107{
108 struct qxl_drm_chunk *drv_chunk;
41 struct qxl_image *image; 109 struct qxl_image *image;
42 struct qxl_data_chunk *chunk; 110 struct qxl_data_chunk *chunk;
43 int i; 111 int i;
44 int chunk_stride; 112 int chunk_stride;
45 int linesize = width * depth / 8; 113 int linesize = width * depth / 8;
46 struct qxl_bo *chunk_bo; 114 struct qxl_bo *chunk_bo, *image_bo;
47 int ret;
48 void *ptr; 115 void *ptr;
49 /* Chunk */ 116 /* Chunk */
50 /* FIXME: Check integer overflow */ 117 /* FIXME: Check integer overflow */
51 /* TODO: variable number of chunks */ 118 /* TODO: variable number of chunks */
119
120 drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
121
122 chunk_bo = drv_chunk->bo;
52 chunk_stride = stride; /* TODO: should use linesize, but it renders 123 chunk_stride = stride; /* TODO: should use linesize, but it renders
53 wrong (check the bitmaps are sent correctly 124 wrong (check the bitmaps are sent correctly
54 first) */ 125 first) */
55 ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, 126
56 &chunk_bo);
57
58 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); 127 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
59 chunk = ptr; 128 chunk = ptr;
60 chunk->data_size = height * chunk_stride; 129 chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
102 while (remain > 0) { 171 while (remain > 0) {
103 page_base = out_offset & PAGE_MASK; 172 page_base = out_offset & PAGE_MASK;
104 page_offset = offset_in_page(out_offset); 173 page_offset = offset_in_page(out_offset);
105
106 size = min((int)(PAGE_SIZE - page_offset), remain); 174 size = min((int)(PAGE_SIZE - page_offset), remain);
107 175
108 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); 176 ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
116 } 184 }
117 } 185 }
118 } 186 }
119
120
121 qxl_bo_kunmap(chunk_bo); 187 qxl_bo_kunmap(chunk_bo);
122 188
123 /* Image */ 189 image_bo = dimage->bo;
124 ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); 190 ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
125
126 ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
127 image = ptr; 191 image = ptr;
128 192
129 image->descriptor.id = 0; 193 image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
154 image->u.bitmap.stride = chunk_stride; 218 image->u.bitmap.stride = chunk_stride;
155 image->u.bitmap.palette = 0; 219 image->u.bitmap.palette = 0;
156 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); 220 image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
157 qxl_release_add_res(qdev, release, chunk_bo);
158 qxl_bo_unreserve(chunk_bo);
159 qxl_bo_unref(&chunk_bo);
160 221
161 qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); 222 qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
162 223
163 return 0; 224 return 0;
164} 225}
165 226
166int qxl_image_create(struct qxl_device *qdev, 227int qxl_image_init(struct qxl_device *qdev,
167 struct qxl_release *release, 228 struct qxl_release *release,
168 struct qxl_bo **image_bo, 229 struct qxl_drm_image *dimage,
169 const uint8_t *data, 230 const uint8_t *data,
170 int x, int y, int width, int height, 231 int x, int y, int width, int height,
171 int depth, int stride) 232 int depth, int stride)
172{ 233{
173 data += y * stride + x * (depth / 8); 234 data += y * stride + x * (depth / 8);
174 return qxl_image_create_helper(qdev, release, image_bo, data, 235 return qxl_image_init_helper(qdev, release, dimage, data,
175 width, height, depth, 0, stride); 236 width, height, depth, 0, stride);
176} 237}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250d..6de33563d6f1 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
68 &qxl_map->offset); 68 &qxl_map->offset);
69} 69}
70 70
71struct qxl_reloc_info {
72 int type;
73 struct qxl_bo *dst_bo;
74 uint32_t dst_offset;
75 struct qxl_bo *src_bo;
76 int src_offset;
77};
78
71/* 79/*
72 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's 80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
73 * are on vram). 81 * are on vram).
74 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) 82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
75 */ 83 */
76static void 84static void
77apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
78 struct qxl_bo *src, uint64_t src_off)
79{ 86{
80 void *reloc_page; 87 void *reloc_page;
81 88 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
82 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 89 *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
83 *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, 90 info->src_bo,
84 src, src_off); 91 info->src_offset);
85 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 92 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
86} 93}
87 94
88static void 95static void
89apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, 96apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
90 struct qxl_bo *src)
91{ 97{
92 uint32_t id = 0; 98 uint32_t id = 0;
93 void *reloc_page; 99 void *reloc_page;
94 100
95 if (src && !src->is_primary) 101 if (info->src_bo && !info->src_bo->is_primary)
96 id = src->surface_id; 102 id = info->src_bo->surface_id;
97 103
98 reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); 104 reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
99 *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; 105 *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
100 qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); 106 qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
101} 107}
102 108
103/* return holding the reference to this object */ 109/* return holding the reference to this object */
104static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, 110static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
105 struct drm_file *file_priv, uint64_t handle, 111 struct drm_file *file_priv, uint64_t handle,
106 struct qxl_reloc_list *reloc_list) 112 struct qxl_release *release)
107{ 113{
108 struct drm_gem_object *gobj; 114 struct drm_gem_object *gobj;
109 struct qxl_bo *qobj; 115 struct qxl_bo *qobj;
110 int ret; 116 int ret;
111 117
112 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); 118 gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
113 if (!gobj) { 119 if (!gobj)
114 DRM_ERROR("bad bo handle %lld\n", handle);
115 return NULL; 120 return NULL;
116 } 121
117 qobj = gem_to_qxl_bo(gobj); 122 qobj = gem_to_qxl_bo(gobj);
118 123
119 ret = qxl_bo_list_add(reloc_list, qobj); 124 ret = qxl_release_list_add(release, qobj);
120 if (ret) 125 if (ret)
121 return NULL; 126 return NULL;
122 127
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
129 * However, the command as passed from user space must *not* contain the initial 134 * However, the command as passed from user space must *not* contain the initial
130 * QXLReleaseInfo struct (first XXX bytes) 135 * QXLReleaseInfo struct (first XXX bytes)
131 */ 136 */
132static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, 137static int qxl_process_single_command(struct qxl_device *qdev,
133 struct drm_file *file_priv) 138 struct drm_qxl_command *cmd,
139 struct drm_file *file_priv)
134{ 140{
135 struct qxl_device *qdev = dev->dev_private; 141 struct qxl_reloc_info *reloc_info;
136 struct drm_qxl_execbuffer *execbuffer = data; 142 int release_type;
137 struct drm_qxl_command user_cmd; 143 struct qxl_release *release;
138 int cmd_num; 144 struct qxl_bo *cmd_bo;
139 struct qxl_bo *reloc_src_bo;
140 struct qxl_bo *reloc_dst_bo;
141 struct drm_qxl_reloc reloc;
142 void *fb_cmd; 145 void *fb_cmd;
143 int i, ret; 146 int i, j, ret, num_relocs;
144 struct qxl_reloc_list reloc_list;
145 int unwritten; 147 int unwritten;
146 uint32_t reloc_dst_offset;
147 INIT_LIST_HEAD(&reloc_list.bos);
148 148
149 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { 149 switch (cmd->type) {
150 struct qxl_release *release; 150 case QXL_CMD_DRAW:
151 struct qxl_bo *cmd_bo; 151 release_type = QXL_RELEASE_DRAWABLE;
152 int release_type; 152 break;
153 struct drm_qxl_command *commands = 153 case QXL_CMD_SURFACE:
154 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; 154 case QXL_CMD_CURSOR:
155 default:
156 DRM_DEBUG("Only draw commands in execbuffers\n");
157 return -EINVAL;
158 break;
159 }
155 160
156 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], 161 if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
157 sizeof(user_cmd))) 162 return -EINVAL;
158 return -EFAULT;
159 switch (user_cmd.type) {
160 case QXL_CMD_DRAW:
161 release_type = QXL_RELEASE_DRAWABLE;
162 break;
163 case QXL_CMD_SURFACE:
164 case QXL_CMD_CURSOR:
165 default:
166 DRM_DEBUG("Only draw commands in execbuffers\n");
167 return -EINVAL;
168 break;
169 }
170 163
171 if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) 164 if (!access_ok(VERIFY_READ,
172 return -EINVAL; 165 (void *)(unsigned long)cmd->command,
166 cmd->command_size))
167 return -EFAULT;
173 168
174 if (!access_ok(VERIFY_READ, 169 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
175 (void *)(unsigned long)user_cmd.command, 170 if (!reloc_info)
176 user_cmd.command_size)) 171 return -ENOMEM;
177 return -EFAULT;
178 172
179 ret = qxl_alloc_release_reserved(qdev, 173 ret = qxl_alloc_release_reserved(qdev,
180 sizeof(union qxl_release_info) + 174 sizeof(union qxl_release_info) +
181 user_cmd.command_size, 175 cmd->command_size,
182 release_type, 176 release_type,
183 &release, 177 &release,
184 &cmd_bo); 178 &cmd_bo);
185 if (ret) 179 if (ret)
186 return ret; 180 goto out_free_reloc;
187 181
188 /* TODO copy slow path code from i915 */ 182 /* TODO copy slow path code from i915 */
189 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); 183 fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
190 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); 184 unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
191 185
192 { 186 {
193 struct qxl_drawable *draw = fb_cmd; 187 struct qxl_drawable *draw = fb_cmd;
188 draw->mm_time = qdev->rom->mm_clock;
189 }
194 190
195 draw->mm_time = qdev->rom->mm_clock; 191 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
196 } 192 if (unwritten) {
197 qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); 193 DRM_ERROR("got unwritten %d\n", unwritten);
198 if (unwritten) { 194 ret = -EFAULT;
199 DRM_ERROR("got unwritten %d\n", unwritten); 195 goto out_free_release;
200 qxl_release_unreserve(qdev, release); 196 }
201 qxl_release_free(qdev, release); 197
202 return -EFAULT; 198 /* fill out reloc info structs */
199 num_relocs = 0;
200 for (i = 0; i < cmd->relocs_num; ++i) {
201 struct drm_qxl_reloc reloc;
202
203 if (DRM_COPY_FROM_USER(&reloc,
204 &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
205 sizeof(reloc))) {
206 ret = -EFAULT;
207 goto out_free_bos;
203 } 208 }
204 209
205 for (i = 0 ; i < user_cmd.relocs_num; ++i) { 210 /* add the bos to the list of bos to validate -
206 if (DRM_COPY_FROM_USER(&reloc, 211 need to validate first then process relocs? */
207 &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], 212 if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
208 sizeof(reloc))) { 213 DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
209 qxl_bo_list_unreserve(&reloc_list, true);
210 qxl_release_unreserve(qdev, release);
211 qxl_release_free(qdev, release);
212 return -EFAULT;
213 }
214 214
215 /* add the bos to the list of bos to validate - 215 ret = -EINVAL;
216 need to validate first then process relocs? */ 216 goto out_free_bos;
217 if (reloc.dst_handle) { 217 }
218 reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, 218 reloc_info[i].type = reloc.reloc_type;
219 reloc.dst_handle, &reloc_list); 219
220 if (!reloc_dst_bo) { 220 if (reloc.dst_handle) {
221 qxl_bo_list_unreserve(&reloc_list, true); 221 reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
222 qxl_release_unreserve(qdev, release); 222 reloc.dst_handle, release);
223 qxl_release_free(qdev, release); 223 if (!reloc_info[i].dst_bo) {
224 return -EINVAL; 224 ret = -EINVAL;
225 } 225 reloc_info[i].src_bo = NULL;
226 reloc_dst_offset = 0; 226 goto out_free_bos;
227 } else {
228 reloc_dst_bo = cmd_bo;
229 reloc_dst_offset = release->release_offset;
230 } 227 }
231 228 reloc_info[i].dst_offset = reloc.dst_offset;
232 /* reserve and validate the reloc dst bo */ 229 } else {
233 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { 230 reloc_info[i].dst_bo = cmd_bo;
234 reloc_src_bo = 231 reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
235 qxlhw_handle_to_bo(qdev, file_priv, 232 }
236 reloc.src_handle, &reloc_list); 233 num_relocs++;
237 if (!reloc_src_bo) { 234
238 if (reloc_dst_bo != cmd_bo) 235 /* reserve and validate the reloc dst bo */
239 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 236 if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
240 qxl_bo_list_unreserve(&reloc_list, true); 237 reloc_info[i].src_bo =
241 qxl_release_unreserve(qdev, release); 238 qxlhw_handle_to_bo(qdev, file_priv,
242 qxl_release_free(qdev, release); 239 reloc.src_handle, release);
243 return -EINVAL; 240 if (!reloc_info[i].src_bo) {
244 } 241 if (reloc_info[i].dst_bo != cmd_bo)
245 } else 242 drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
246 reloc_src_bo = NULL; 243 ret = -EINVAL;
247 if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { 244 goto out_free_bos;
248 apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
249 reloc_src_bo, reloc.src_offset);
250 } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
251 apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
252 } else {
253 DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
254 return -EINVAL;
255 } 245 }
246 reloc_info[i].src_offset = reloc.src_offset;
247 } else {
248 reloc_info[i].src_bo = NULL;
249 reloc_info[i].src_offset = 0;
250 }
251 }
256 252
257 if (reloc_src_bo && reloc_src_bo != cmd_bo) { 253 /* validate all buffers */
258 qxl_release_add_res(qdev, release, reloc_src_bo); 254 ret = qxl_release_reserve_list(release, false);
259 drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); 255 if (ret)
260 } 256 goto out_free_bos;
261 257
262 if (reloc_dst_bo != cmd_bo) 258 for (i = 0; i < cmd->relocs_num; ++i) {
263 drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); 259 if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
264 } 260 apply_reloc(qdev, &reloc_info[i]);
265 qxl_fence_releaseable(qdev, release); 261 else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
262 apply_surf_reloc(qdev, &reloc_info[i]);
263 }
266 264
267 ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); 265 ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
268 if (ret == -ERESTARTSYS) { 266 if (ret)
269 qxl_release_unreserve(qdev, release); 267 qxl_release_backoff_reserve_list(release);
270 qxl_release_free(qdev, release); 268 else
271 qxl_bo_list_unreserve(&reloc_list, true); 269 qxl_release_fence_buffer_objects(release);
270
271out_free_bos:
272 for (j = 0; j < num_relocs; j++) {
273 if (reloc_info[j].dst_bo != cmd_bo)
274 drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
275 if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
276 drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
277 }
278out_free_release:
279 if (ret)
280 qxl_release_free(qdev, release);
281out_free_reloc:
282 kfree(reloc_info);
283 return ret;
284}
285
286static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file_priv)
288{
289 struct qxl_device *qdev = dev->dev_private;
290 struct drm_qxl_execbuffer *execbuffer = data;
291 struct drm_qxl_command user_cmd;
292 int cmd_num;
293 int ret;
294
295 for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
296
297 struct drm_qxl_command *commands =
298 (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
299
300 if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
301 sizeof(user_cmd)))
302 return -EFAULT;
303
304 ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
305 if (ret)
272 return ret; 306 return ret;
273 }
274 qxl_release_unreserve(qdev, release);
275 } 307 }
276 qxl_bo_list_unreserve(&reloc_list, 0);
277 return 0; 308 return 0;
278} 309}
279 310
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
305 goto out; 336 goto out;
306 337
307 if (!qobj->pin_count) { 338 if (!qobj->pin_count) {
308 qxl_ttm_placement_from_domain(qobj, qobj->type); 339 qxl_ttm_placement_from_domain(qobj, qobj->type, false);
309 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 340 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
310 true, false); 341 true, false);
311 if (unlikely(ret)) 342 if (unlikely(ret))
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c9..aa161cddd87e 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
51 return false; 51 return false;
52} 52}
53 53
54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) 54void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55{ 55{
56 u32 c = 0; 56 u32 c = 0;
57 u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
57 58
58 qbo->placement.fpfn = 0; 59 qbo->placement.fpfn = 0;
59 qbo->placement.lpfn = 0; 60 qbo->placement.lpfn = 0;
60 qbo->placement.placement = qbo->placements; 61 qbo->placement.placement = qbo->placements;
61 qbo->placement.busy_placement = qbo->placements; 62 qbo->placement.busy_placement = qbo->placements;
62 if (domain == QXL_GEM_DOMAIN_VRAM) 63 if (domain == QXL_GEM_DOMAIN_VRAM)
63 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; 64 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
64 if (domain == QXL_GEM_DOMAIN_SURFACE) 65 if (domain == QXL_GEM_DOMAIN_SURFACE)
65 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; 66 qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
66 if (domain == QXL_GEM_DOMAIN_CPU) 67 if (domain == QXL_GEM_DOMAIN_CPU)
67 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 68 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
68 if (!c) 69 if (!c)
69 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 70 qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
70 qbo->placement.num_placement = c; 71 qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
73 74
74 75
75int qxl_bo_create(struct qxl_device *qdev, 76int qxl_bo_create(struct qxl_device *qdev,
76 unsigned long size, bool kernel, u32 domain, 77 unsigned long size, bool kernel, bool pinned, u32 domain,
77 struct qxl_surface *surf, 78 struct qxl_surface *surf,
78 struct qxl_bo **bo_ptr) 79 struct qxl_bo **bo_ptr)
79{ 80{
@@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev,
99 } 100 }
100 bo->gem_base.driver_private = NULL; 101 bo->gem_base.driver_private = NULL;
101 bo->type = domain; 102 bo->type = domain;
102 bo->pin_count = 0; 103 bo->pin_count = pinned ? 1 : 0;
103 bo->surface_id = 0; 104 bo->surface_id = 0;
104 qxl_fence_init(qdev, &bo->fence); 105 qxl_fence_init(qdev, &bo->fence);
105 INIT_LIST_HEAD(&bo->list); 106 INIT_LIST_HEAD(&bo->list);
106 atomic_set(&bo->reserve_count, 0); 107
107 if (surf) 108 if (surf)
108 bo->surf = *surf; 109 bo->surf = *surf;
109 110
110 qxl_ttm_placement_from_domain(bo, domain); 111 qxl_ttm_placement_from_domain(bo, domain, pinned);
111 112
112 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 113 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
113 &bo->placement, 0, !kernel, NULL, size, 114 &bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
228int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) 229int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
229{ 230{
230 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; 231 struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
231 int r, i; 232 int r;
232 233
233 if (bo->pin_count) { 234 if (bo->pin_count) {
234 bo->pin_count++; 235 bo->pin_count++;
@@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
236 *gpu_addr = qxl_bo_gpu_offset(bo); 237 *gpu_addr = qxl_bo_gpu_offset(bo);
237 return 0; 238 return 0;
238 } 239 }
239 qxl_ttm_placement_from_domain(bo, domain); 240 qxl_ttm_placement_from_domain(bo, domain, true);
240 for (i = 0; i < bo->placement.num_placement; i++)
241 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
242 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); 241 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
243 if (likely(r == 0)) { 242 if (likely(r == 0)) {
244 bo->pin_count = 1; 243 bo->pin_count = 1;
@@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
317 return 0; 316 return 0;
318} 317}
319 318
320void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
321{
322 struct qxl_bo_list *entry, *sf;
323
324 list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
325 qxl_bo_unreserve(entry->bo);
326 list_del(&entry->lhead);
327 kfree(entry);
328 }
329}
330
331int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
332{
333 struct qxl_bo_list *entry;
334 int ret;
335
336 list_for_each_entry(entry, &reloc_list->bos, lhead) {
337 if (entry->bo == bo)
338 return 0;
339 }
340
341 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
342 if (!entry)
343 return -ENOMEM;
344
345 entry->bo = bo;
346 list_add(&entry->lhead, &reloc_list->bos);
347
348 ret = qxl_bo_reserve(bo, false);
349 if (ret)
350 return ret;
351
352 if (!bo->pin_count) {
353 qxl_ttm_placement_from_domain(bo, bo->type);
354 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
355 true, false);
356 if (ret)
357 return ret;
358 }
359
360 /* allocate a surface for reserved + validated buffers */
361 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
362 if (ret)
363 return ret;
364 return 0;
365}
366
367int qxl_surf_evict(struct qxl_device *qdev) 319int qxl_surf_evict(struct qxl_device *qdev)
368{ 320{
369 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); 321 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce781..8cb6167038e5 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
88 88
89extern int qxl_bo_create(struct qxl_device *qdev, 89extern int qxl_bo_create(struct qxl_device *qdev,
90 unsigned long size, 90 unsigned long size,
91 bool kernel, u32 domain, 91 bool kernel, bool pinned, u32 domain,
92 struct qxl_surface *surf, 92 struct qxl_surface *surf,
93 struct qxl_bo **bo_ptr); 93 struct qxl_bo **bo_ptr);
94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); 94extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99extern void qxl_bo_unref(struct qxl_bo **bo); 99extern void qxl_bo_unref(struct qxl_bo **bo);
100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); 100extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
101extern int qxl_bo_unpin(struct qxl_bo *bo); 101extern int qxl_bo_unpin(struct qxl_bo *bo);
102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); 102extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); 103extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
104 104
105extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
106extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
107#endif 105#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5f..b61449e52cd5 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
38 38
39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 39static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 40static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
41uint64_t 41
42static uint64_t
42qxl_release_alloc(struct qxl_device *qdev, int type, 43qxl_release_alloc(struct qxl_device *qdev, int type,
43 struct qxl_release **ret) 44 struct qxl_release **ret)
44{ 45{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
53 return 0; 54 return 0;
54 } 55 }
55 release->type = type; 56 release->type = type;
56 release->bo_count = 0;
57 release->release_offset = 0; 57 release->release_offset = 0;
58 release->surface_release_id = 0; 58 release->surface_release_id = 0;
59 INIT_LIST_HEAD(&release->bos);
59 60
60 idr_preload(GFP_KERNEL); 61 idr_preload(GFP_KERNEL);
61 spin_lock(&qdev->release_idr_lock); 62 spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
77qxl_release_free(struct qxl_device *qdev, 78qxl_release_free(struct qxl_device *qdev,
78 struct qxl_release *release) 79 struct qxl_release *release)
79{ 80{
80 int i; 81 struct qxl_bo_list *entry, *tmp;
81 82 QXL_INFO(qdev, "release %d, type %d\n", release->id,
82 QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, 83 release->type);
83 release->type, release->bo_count);
84 84
85 if (release->surface_release_id) 85 if (release->surface_release_id)
86 qxl_surface_id_dealloc(qdev, release->surface_release_id); 86 qxl_surface_id_dealloc(qdev, release->surface_release_id);
87 87
88 for (i = 0 ; i < release->bo_count; ++i) { 88 list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
89 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
89 QXL_INFO(qdev, "release %llx\n", 90 QXL_INFO(qdev, "release %llx\n",
90 release->bos[i]->tbo.addr_space_offset 91 entry->tv.bo->addr_space_offset
91 - DRM_FILE_OFFSET); 92 - DRM_FILE_OFFSET);
92 qxl_fence_remove_release(&release->bos[i]->fence, release->id); 93 qxl_fence_remove_release(&bo->fence, release->id);
93 qxl_bo_unref(&release->bos[i]); 94 qxl_bo_unref(&bo);
94 } 95 }
95 spin_lock(&qdev->release_idr_lock); 96 spin_lock(&qdev->release_idr_lock);
96 idr_remove(&qdev->release_idr, release->id); 97 idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
98 kfree(release); 99 kfree(release);
99} 100}
100 101
101void
102qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
103 struct qxl_bo *bo)
104{
105 int i;
106 for (i = 0; i < release->bo_count; i++)
107 if (release->bos[i] == bo)
108 return;
109
110 if (release->bo_count >= QXL_MAX_RES) {
111 DRM_ERROR("exceeded max resource on a qxl_release item\n");
112 return;
113 }
114 release->bos[release->bo_count++] = qxl_bo_ref(bo);
115}
116
117static int qxl_release_bo_alloc(struct qxl_device *qdev, 102static int qxl_release_bo_alloc(struct qxl_device *qdev,
118 struct qxl_bo **bo) 103 struct qxl_bo **bo)
119{ 104{
120 int ret; 105 int ret;
121 ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, 106 /* pin releases bo's they are too messy to evict */
107 ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
108 QXL_GEM_DOMAIN_VRAM, NULL,
122 bo); 109 bo);
123 return ret; 110 return ret;
124} 111}
125 112
126int qxl_release_reserve(struct qxl_device *qdev, 113int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
127 struct qxl_release *release, bool no_wait) 114{
115 struct qxl_bo_list *entry;
116
117 list_for_each_entry(entry, &release->bos, tv.head) {
118 if (entry->tv.bo == &bo->tbo)
119 return 0;
120 }
121
122 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
123 if (!entry)
124 return -ENOMEM;
125
126 qxl_bo_ref(bo);
127 entry->tv.bo = &bo->tbo;
128 list_add_tail(&entry->tv.head, &release->bos);
129 return 0;
130}
131
132static int qxl_release_validate_bo(struct qxl_bo *bo)
128{ 133{
129 int ret; 134 int ret;
130 if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { 135
131 ret = qxl_bo_reserve(release->bos[0], no_wait); 136 if (!bo->pin_count) {
137 qxl_ttm_placement_from_domain(bo, bo->type, false);
138 ret = ttm_bo_validate(&bo->tbo, &bo->placement,
139 true, false);
132 if (ret) 140 if (ret)
133 return ret; 141 return ret;
134 } 142 }
143
144 /* allocate a surface for reserved + validated buffers */
145 ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
146 if (ret)
147 return ret;
148 return 0;
149}
150
151int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
152{
153 int ret;
154 struct qxl_bo_list *entry;
155
156 /* if only one object on the release its the release itself
157 since these objects are pinned no need to reserve */
158 if (list_is_singular(&release->bos))
159 return 0;
160
161 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
162 if (ret)
163 return ret;
164
165 list_for_each_entry(entry, &release->bos, tv.head) {
166 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
167
168 ret = qxl_release_validate_bo(bo);
169 if (ret) {
170 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
171 return ret;
172 }
173 }
135 return 0; 174 return 0;
136} 175}
137 176
138void qxl_release_unreserve(struct qxl_device *qdev, 177void qxl_release_backoff_reserve_list(struct qxl_release *release)
139 struct qxl_release *release)
140{ 178{
141 if (atomic_dec_and_test(&release->bos[0]->reserve_count)) 179 /* if only one object on the release its the release itself
142 qxl_bo_unreserve(release->bos[0]); 180 since these objects are pinned no need to reserve */
181 if (list_is_singular(&release->bos))
182 return;
183
184 ttm_eu_backoff_reservation(&release->ticket, &release->bos);
143} 185}
144 186
187
145int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 188int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
146 enum qxl_surface_cmd_type surface_cmd_type, 189 enum qxl_surface_cmd_type surface_cmd_type,
147 struct qxl_release *create_rel, 190 struct qxl_release *create_rel,
148 struct qxl_release **release) 191 struct qxl_release **release)
149{ 192{
150 int ret;
151
152 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 193 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
153 int idr_ret; 194 int idr_ret;
195 struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
154 struct qxl_bo *bo; 196 struct qxl_bo *bo;
155 union qxl_release_info *info; 197 union qxl_release_info *info;
156 198
157 /* stash the release after the create command */ 199 /* stash the release after the create command */
158 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 200 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
159 bo = qxl_bo_ref(create_rel->bos[0]); 201 bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
160 202
161 (*release)->release_offset = create_rel->release_offset + 64; 203 (*release)->release_offset = create_rel->release_offset + 64;
162 204
163 qxl_release_add_res(qdev, *release, bo); 205 qxl_release_list_add(*release, bo);
164 206
165 ret = qxl_release_reserve(qdev, *release, false);
166 if (ret) {
167 DRM_ERROR("release reserve failed\n");
168 goto out_unref;
169 }
170 info = qxl_release_map(qdev, *release); 207 info = qxl_release_map(qdev, *release);
171 info->id = idr_ret; 208 info->id = idr_ret;
172 qxl_release_unmap(qdev, *release, info); 209 qxl_release_unmap(qdev, *release, info);
173 210
174
175out_unref:
176 qxl_bo_unref(&bo); 211 qxl_bo_unref(&bo);
177 return ret; 212 return 0;
178 } 213 }
179 214
180 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 215 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
187{ 222{
188 struct qxl_bo *bo; 223 struct qxl_bo *bo;
189 int idr_ret; 224 int idr_ret;
190 int ret; 225 int ret = 0;
191 union qxl_release_info *info; 226 union qxl_release_info *info;
192 int cur_idx; 227 int cur_idx;
193 228
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
216 mutex_unlock(&qdev->release_mutex); 251 mutex_unlock(&qdev->release_mutex);
217 return ret; 252 return ret;
218 } 253 }
219
220 /* pin releases bo's they are too messy to evict */
221 ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
222 qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
223 qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
224 } 254 }
225 255
226 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 256 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
231 if (rbo) 261 if (rbo)
232 *rbo = bo; 262 *rbo = bo;
233 263
234 qxl_release_add_res(qdev, *release, bo);
235
236 ret = qxl_release_reserve(qdev, *release, false);
237 mutex_unlock(&qdev->release_mutex); 264 mutex_unlock(&qdev->release_mutex);
238 if (ret) 265
239 goto out_unref; 266 qxl_release_list_add(*release, bo);
240 267
241 info = qxl_release_map(qdev, *release); 268 info = qxl_release_map(qdev, *release);
242 info->id = idr_ret; 269 info->id = idr_ret;
243 qxl_release_unmap(qdev, *release, info); 270 qxl_release_unmap(qdev, *release, info);
244 271
245out_unref:
246 qxl_bo_unref(&bo); 272 qxl_bo_unref(&bo);
247 return ret; 273 return ret;
248} 274}
249 275
250int qxl_fence_releaseable(struct qxl_device *qdev,
251 struct qxl_release *release)
252{
253 int i, ret;
254 for (i = 0; i < release->bo_count; i++) {
255 if (!release->bos[i]->tbo.sync_obj)
256 release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
257 ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
258 if (ret)
259 return ret;
260 }
261 return 0;
262}
263
264struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 276struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
265 uint64_t id) 277 uint64_t id)
266{ 278{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
273 DRM_ERROR("failed to find id in release_idr\n"); 285 DRM_ERROR("failed to find id in release_idr\n");
274 return NULL; 286 return NULL;
275 } 287 }
276 if (release->bo_count < 1) { 288
277 DRM_ERROR("read a released resource with 0 bos\n");
278 return NULL;
279 }
280 return release; 289 return release;
281} 290}
282 291
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
285{ 294{
286 void *ptr; 295 void *ptr;
287 union qxl_release_info *info; 296 union qxl_release_info *info;
288 struct qxl_bo *bo = release->bos[0]; 297 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
298 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
289 299
290 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); 300 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
301 if (!ptr)
302 return NULL;
291 info = ptr + (release->release_offset & ~PAGE_SIZE); 303 info = ptr + (release->release_offset & ~PAGE_SIZE);
292 return info; 304 return info;
293} 305}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
296 struct qxl_release *release, 308 struct qxl_release *release,
297 union qxl_release_info *info) 309 union qxl_release_info *info)
298{ 310{
299 struct qxl_bo *bo = release->bos[0]; 311 struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
312 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
300 void *ptr; 313 void *ptr;
301 314
302 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); 315 ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
303 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 316 qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
304} 317}
318
319void qxl_release_fence_buffer_objects(struct qxl_release *release)
320{
321 struct ttm_validate_buffer *entry;
322 struct ttm_buffer_object *bo;
323 struct ttm_bo_global *glob;
324 struct ttm_bo_device *bdev;
325 struct ttm_bo_driver *driver;
326 struct qxl_bo *qbo;
327
328 /* if only one object on the release its the release itself
329 since these objects are pinned no need to reserve */
330 if (list_is_singular(&release->bos))
331 return;
332
333 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
334 bdev = bo->bdev;
335 driver = bdev->driver;
336 glob = bo->glob;
337
338 spin_lock(&glob->lru_lock);
339 spin_lock(&bdev->fence_lock);
340
341 list_for_each_entry(entry, &release->bos, head) {
342 bo = entry->bo;
343 qbo = to_qxl_bo(bo);
344
345 if (!entry->bo->sync_obj)
346 entry->bo->sync_obj = &qbo->fence;
347
348 qxl_fence_add_release_locked(&qbo->fence, release->id);
349
350 ttm_bo_add_to_lru(bo);
351 ww_mutex_unlock(&bo->resv->lock);
352 entry->reserved = false;
353 }
354 spin_unlock(&bdev->fence_lock);
355 spin_unlock(&glob->lru_lock);
356 ww_acquire_fini(&release->ticket);
357}
358
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4d..1dfd84cda2a1 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
206 return; 206 return;
207 } 207 }
208 qbo = container_of(bo, struct qxl_bo, tbo); 208 qbo = container_of(bo, struct qxl_bo, tbo);
209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); 209 qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
210 *placement = qbo->placement; 210 *placement = qbo->placement;
211} 211}
212 212