diff options
25 files changed, 7260 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 1e82882da9de..19b8e0d5d910 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig | |||
@@ -220,3 +220,5 @@ source "drivers/gpu/drm/tegra/Kconfig" | |||
220 | source "drivers/gpu/drm/omapdrm/Kconfig" | 220 | source "drivers/gpu/drm/omapdrm/Kconfig" |
221 | 221 | ||
222 | source "drivers/gpu/drm/tilcdc/Kconfig" | 222 | source "drivers/gpu/drm/tilcdc/Kconfig" |
223 | |||
224 | source "drivers/gpu/drm/qxl/Kconfig" | ||
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 0d59b24f8d23..6a4211521011 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile | |||
@@ -52,4 +52,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ | |||
52 | obj-$(CONFIG_DRM_TEGRA) += tegra/ | 52 | obj-$(CONFIG_DRM_TEGRA) += tegra/ |
53 | obj-$(CONFIG_DRM_OMAP) += omapdrm/ | 53 | obj-$(CONFIG_DRM_OMAP) += omapdrm/ |
54 | obj-$(CONFIG_DRM_TILCDC) += tilcdc/ | 54 | obj-$(CONFIG_DRM_TILCDC) += tilcdc/ |
55 | obj-$(CONFIG_DRM_QXL) += qxl/ | ||
55 | obj-y += i2c/ | 56 | obj-y += i2c/ |
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig new file mode 100644 index 000000000000..2f1a57e11140 --- /dev/null +++ b/drivers/gpu/drm/qxl/Kconfig | |||
@@ -0,0 +1,10 @@ | |||
1 | config DRM_QXL | ||
2 | tristate "QXL virtual GPU" | ||
3 | depends on DRM && PCI | ||
4 | select FB_SYS_FILLRECT | ||
5 | select FB_SYS_COPYAREA | ||
6 | select FB_SYS_IMAGEBLIT | ||
7 | select DRM_KMS_HELPER | ||
8 | select DRM_TTM | ||
9 | help | ||
10 | QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. | ||
diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile new file mode 100644 index 000000000000..ea046ba691d2 --- /dev/null +++ b/drivers/gpu/drm/qxl/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the drm device driver. This driver provides support for the | ||
3 | # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. | ||
4 | |||
5 | ccflags-y := -Iinclude/drm | ||
6 | |||
7 | qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o | ||
8 | |||
9 | obj-$(CONFIG_DRM_QXL)+= qxl.o | ||
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c new file mode 100644 index 000000000000..804b411a60ca --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_cmd.c | |||
@@ -0,0 +1,707 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | /* QXL cmd/ring handling */ | ||
27 | |||
28 | #include "qxl_drv.h" | ||
29 | #include "qxl_object.h" | ||
30 | |||
31 | static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap); | ||
32 | |||
33 | struct ring { | ||
34 | struct qxl_ring_header header; | ||
35 | uint8_t elements[0]; | ||
36 | }; | ||
37 | |||
38 | struct qxl_ring { | ||
39 | struct ring *ring; | ||
40 | int element_size; | ||
41 | int n_elements; | ||
42 | int prod_notify; | ||
43 | wait_queue_head_t *push_event; | ||
44 | spinlock_t lock; | ||
45 | }; | ||
46 | |||
47 | void qxl_ring_free(struct qxl_ring *ring) | ||
48 | { | ||
49 | kfree(ring); | ||
50 | } | ||
51 | |||
52 | struct qxl_ring * | ||
53 | qxl_ring_create(struct qxl_ring_header *header, | ||
54 | int element_size, | ||
55 | int n_elements, | ||
56 | int prod_notify, | ||
57 | bool set_prod_notify, | ||
58 | wait_queue_head_t *push_event) | ||
59 | { | ||
60 | struct qxl_ring *ring; | ||
61 | |||
62 | ring = kmalloc(sizeof(*ring), GFP_KERNEL); | ||
63 | if (!ring) | ||
64 | return NULL; | ||
65 | |||
66 | ring->ring = (struct ring *)header; | ||
67 | ring->element_size = element_size; | ||
68 | ring->n_elements = n_elements; | ||
69 | ring->prod_notify = prod_notify; | ||
70 | ring->push_event = push_event; | ||
71 | if (set_prod_notify) | ||
72 | header->notify_on_prod = ring->n_elements; | ||
73 | spin_lock_init(&ring->lock); | ||
74 | return ring; | ||
75 | } | ||
76 | |||
77 | static int qxl_check_header(struct qxl_ring *ring) | ||
78 | { | ||
79 | int ret; | ||
80 | struct qxl_ring_header *header = &(ring->ring->header); | ||
81 | unsigned long flags; | ||
82 | spin_lock_irqsave(&ring->lock, flags); | ||
83 | ret = header->prod - header->cons < header->num_items; | ||
84 | if (ret == 0) | ||
85 | header->notify_on_cons = header->cons + 1; | ||
86 | spin_unlock_irqrestore(&ring->lock, flags); | ||
87 | return ret; | ||
88 | } | ||
89 | |||
90 | static int qxl_check_idle(struct qxl_ring *ring) | ||
91 | { | ||
92 | int ret; | ||
93 | struct qxl_ring_header *header = &(ring->ring->header); | ||
94 | unsigned long flags; | ||
95 | spin_lock_irqsave(&ring->lock, flags); | ||
96 | ret = header->prod == header->cons; | ||
97 | spin_unlock_irqrestore(&ring->lock, flags); | ||
98 | return ret; | ||
99 | } | ||
100 | |||
101 | int qxl_ring_push(struct qxl_ring *ring, | ||
102 | const void *new_elt, bool interruptible) | ||
103 | { | ||
104 | struct qxl_ring_header *header = &(ring->ring->header); | ||
105 | uint8_t *elt; | ||
106 | int idx, ret; | ||
107 | unsigned long flags; | ||
108 | spin_lock_irqsave(&ring->lock, flags); | ||
109 | if (header->prod - header->cons == header->num_items) { | ||
110 | header->notify_on_cons = header->cons + 1; | ||
111 | mb(); | ||
112 | spin_unlock_irqrestore(&ring->lock, flags); | ||
113 | if (!drm_can_sleep()) { | ||
114 | while (!qxl_check_header(ring)) | ||
115 | udelay(1); | ||
116 | } else { | ||
117 | if (interruptible) { | ||
118 | ret = wait_event_interruptible(*ring->push_event, | ||
119 | qxl_check_header(ring)); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | } else { | ||
123 | wait_event(*ring->push_event, | ||
124 | qxl_check_header(ring)); | ||
125 | } | ||
126 | |||
127 | } | ||
128 | spin_lock_irqsave(&ring->lock, flags); | ||
129 | } | ||
130 | |||
131 | idx = header->prod & (ring->n_elements - 1); | ||
132 | elt = ring->ring->elements + idx * ring->element_size; | ||
133 | |||
134 | memcpy((void *)elt, new_elt, ring->element_size); | ||
135 | |||
136 | header->prod++; | ||
137 | |||
138 | mb(); | ||
139 | |||
140 | if (header->prod == header->notify_on_prod) | ||
141 | outb(0, ring->prod_notify); | ||
142 | |||
143 | spin_unlock_irqrestore(&ring->lock, flags); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | bool qxl_ring_pop(struct qxl_ring *ring, | ||
148 | void *element) | ||
149 | { | ||
150 | volatile struct qxl_ring_header *header = &(ring->ring->header); | ||
151 | volatile uint8_t *ring_elt; | ||
152 | int idx; | ||
153 | unsigned long flags; | ||
154 | spin_lock_irqsave(&ring->lock, flags); | ||
155 | if (header->cons == header->prod) { | ||
156 | header->notify_on_prod = header->cons + 1; | ||
157 | spin_unlock_irqrestore(&ring->lock, flags); | ||
158 | return false; | ||
159 | } | ||
160 | |||
161 | idx = header->cons & (ring->n_elements - 1); | ||
162 | ring_elt = ring->ring->elements + idx * ring->element_size; | ||
163 | |||
164 | memcpy(element, (void *)ring_elt, ring->element_size); | ||
165 | |||
166 | header->cons++; | ||
167 | |||
168 | spin_unlock_irqrestore(&ring->lock, flags); | ||
169 | return true; | ||
170 | } | ||
171 | |||
172 | void qxl_ring_wait_idle(struct qxl_ring *ring) | ||
173 | { | ||
174 | struct qxl_ring_header *header = &(ring->ring->header); | ||
175 | unsigned long flags; | ||
176 | |||
177 | spin_lock_irqsave(&ring->lock, flags); | ||
178 | if (ring->ring->header.cons < ring->ring->header.prod) { | ||
179 | header->notify_on_cons = header->prod; | ||
180 | mb(); | ||
181 | spin_unlock_irqrestore(&ring->lock, flags); | ||
182 | wait_event_interruptible(*ring->push_event, | ||
183 | qxl_check_idle(ring)); | ||
184 | spin_lock_irqsave(&ring->lock, flags); | ||
185 | } | ||
186 | spin_unlock_irqrestore(&ring->lock, flags); | ||
187 | } | ||
188 | |||
189 | int | ||
190 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | ||
191 | uint32_t type, bool interruptible) | ||
192 | { | ||
193 | struct qxl_command cmd; | ||
194 | |||
195 | cmd.type = type; | ||
196 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | ||
197 | |||
198 | return qxl_ring_push(qdev->command_ring, &cmd, interruptible); | ||
199 | } | ||
200 | |||
201 | int | ||
202 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | ||
203 | uint32_t type, bool interruptible) | ||
204 | { | ||
205 | struct qxl_command cmd; | ||
206 | |||
207 | cmd.type = type; | ||
208 | cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); | ||
209 | |||
210 | return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); | ||
211 | } | ||
212 | |||
213 | bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) | ||
214 | { | ||
215 | if (!qxl_check_idle(qdev->release_ring)) { | ||
216 | queue_work(qdev->gc_queue, &qdev->gc_work); | ||
217 | if (flush) | ||
218 | flush_work(&qdev->gc_work); | ||
219 | return true; | ||
220 | } | ||
221 | return false; | ||
222 | } | ||
223 | |||
224 | int qxl_garbage_collect(struct qxl_device *qdev) | ||
225 | { | ||
226 | struct qxl_release *release; | ||
227 | uint64_t id, next_id; | ||
228 | int i = 0; | ||
229 | int ret; | ||
230 | union qxl_release_info *info; | ||
231 | |||
232 | while (qxl_ring_pop(qdev->release_ring, &id)) { | ||
233 | QXL_INFO(qdev, "popped %lld\n", id); | ||
234 | while (id) { | ||
235 | release = qxl_release_from_id_locked(qdev, id); | ||
236 | if (release == NULL) | ||
237 | break; | ||
238 | |||
239 | ret = qxl_release_reserve(qdev, release, false); | ||
240 | if (ret) { | ||
241 | qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); | ||
242 | DRM_ERROR("failed to reserve release %lld\n", id); | ||
243 | } | ||
244 | |||
245 | info = qxl_release_map(qdev, release); | ||
246 | next_id = info->next; | ||
247 | qxl_release_unmap(qdev, release, info); | ||
248 | |||
249 | qxl_release_unreserve(qdev, release); | ||
250 | QXL_INFO(qdev, "popped %lld, next %lld\n", id, | ||
251 | next_id); | ||
252 | |||
253 | switch (release->type) { | ||
254 | case QXL_RELEASE_DRAWABLE: | ||
255 | case QXL_RELEASE_SURFACE_CMD: | ||
256 | case QXL_RELEASE_CURSOR_CMD: | ||
257 | break; | ||
258 | default: | ||
259 | DRM_ERROR("unexpected release type\n"); | ||
260 | break; | ||
261 | } | ||
262 | id = next_id; | ||
263 | |||
264 | qxl_release_free(qdev, release); | ||
265 | ++i; | ||
266 | } | ||
267 | } | ||
268 | |||
269 | QXL_INFO(qdev, "%s: %lld\n", __func__, i); | ||
270 | |||
271 | return i; | ||
272 | } | ||
273 | |||
274 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | ||
275 | struct qxl_bo **_bo) | ||
276 | { | ||
277 | struct qxl_bo *bo; | ||
278 | int ret; | ||
279 | |||
280 | ret = qxl_bo_create(qdev, size, false /* not kernel - device */, | ||
281 | QXL_GEM_DOMAIN_VRAM, NULL, &bo); | ||
282 | if (ret) { | ||
283 | DRM_ERROR("failed to allocate VRAM BO\n"); | ||
284 | return ret; | ||
285 | } | ||
286 | ret = qxl_bo_reserve(bo, false); | ||
287 | if (unlikely(ret != 0)) | ||
288 | goto out_unref; | ||
289 | |||
290 | *_bo = bo; | ||
291 | return 0; | ||
292 | out_unref: | ||
293 | qxl_bo_unref(&bo); | ||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) | ||
298 | { | ||
299 | int irq_num; | ||
300 | long addr = qdev->io_base + port; | ||
301 | int ret; | ||
302 | |||
303 | mutex_lock(&qdev->async_io_mutex); | ||
304 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | ||
305 | |||
306 | |||
307 | if (qdev->last_sent_io_cmd > irq_num) { | ||
308 | ret = wait_event_interruptible(qdev->io_cmd_event, | ||
309 | atomic_read(&qdev->irq_received_io_cmd) > irq_num); | ||
310 | if (ret) | ||
311 | goto out; | ||
312 | irq_num = atomic_read(&qdev->irq_received_io_cmd); | ||
313 | } | ||
314 | outb(val, addr); | ||
315 | qdev->last_sent_io_cmd = irq_num + 1; | ||
316 | ret = wait_event_interruptible(qdev->io_cmd_event, | ||
317 | atomic_read(&qdev->irq_received_io_cmd) > irq_num); | ||
318 | out: | ||
319 | mutex_unlock(&qdev->async_io_mutex); | ||
320 | return ret; | ||
321 | } | ||
322 | |||
323 | static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) | ||
324 | { | ||
325 | int ret; | ||
326 | |||
327 | restart: | ||
328 | ret = wait_for_io_cmd_user(qdev, val, port); | ||
329 | if (ret == -ERESTARTSYS) | ||
330 | goto restart; | ||
331 | } | ||
332 | |||
333 | int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, | ||
334 | const struct qxl_rect *area) | ||
335 | { | ||
336 | int surface_id; | ||
337 | uint32_t surface_width, surface_height; | ||
338 | int ret; | ||
339 | |||
340 | if (!surf->hw_surf_alloc) | ||
341 | DRM_ERROR("got io update area with no hw surface\n"); | ||
342 | |||
343 | if (surf->is_primary) | ||
344 | surface_id = 0; | ||
345 | else | ||
346 | surface_id = surf->surface_id; | ||
347 | surface_width = surf->surf.width; | ||
348 | surface_height = surf->surf.height; | ||
349 | |||
350 | if (area->left < 0 || area->top < 0 || | ||
351 | area->right > surface_width || area->bottom > surface_height) { | ||
352 | qxl_io_log(qdev, "%s: not doing area update for " | ||
353 | "%d, (%d,%d,%d,%d) (%d,%d)\n", __func__, surface_id, area->left, | ||
354 | area->top, area->right, area->bottom, surface_width, surface_height); | ||
355 | return -EINVAL; | ||
356 | } | ||
357 | mutex_lock(&qdev->update_area_mutex); | ||
358 | qdev->ram_header->update_area = *area; | ||
359 | qdev->ram_header->update_surface = surface_id; | ||
360 | ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); | ||
361 | mutex_unlock(&qdev->update_area_mutex); | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | void qxl_io_notify_oom(struct qxl_device *qdev) | ||
366 | { | ||
367 | outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM); | ||
368 | } | ||
369 | |||
370 | void qxl_io_flush_release(struct qxl_device *qdev) | ||
371 | { | ||
372 | outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE); | ||
373 | } | ||
374 | |||
375 | void qxl_io_flush_surfaces(struct qxl_device *qdev) | ||
376 | { | ||
377 | wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC); | ||
378 | } | ||
379 | |||
380 | |||
381 | void qxl_io_destroy_primary(struct qxl_device *qdev) | ||
382 | { | ||
383 | wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); | ||
384 | } | ||
385 | |||
386 | void qxl_io_create_primary(struct qxl_device *qdev, unsigned width, | ||
387 | unsigned height, unsigned offset, struct qxl_bo *bo) | ||
388 | { | ||
389 | struct qxl_surface_create *create; | ||
390 | |||
391 | QXL_INFO(qdev, "%s: qdev %p, ram_header %p\n", __func__, qdev, | ||
392 | qdev->ram_header); | ||
393 | create = &qdev->ram_header->create_surface; | ||
394 | create->format = bo->surf.format; | ||
395 | create->width = width; | ||
396 | create->height = height; | ||
397 | create->stride = bo->surf.stride; | ||
398 | create->mem = qxl_bo_physical_address(qdev, bo, offset); | ||
399 | |||
400 | QXL_INFO(qdev, "%s: mem = %llx, from %p\n", __func__, create->mem, | ||
401 | bo->kptr); | ||
402 | |||
403 | create->flags = QXL_SURF_FLAG_KEEP_DATA; | ||
404 | create->type = QXL_SURF_TYPE_PRIMARY; | ||
405 | |||
406 | wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); | ||
407 | } | ||
408 | |||
409 | void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) | ||
410 | { | ||
411 | QXL_INFO(qdev, "qxl_memslot_add %d\n", id); | ||
412 | wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); | ||
413 | } | ||
414 | |||
415 | void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...) | ||
416 | { | ||
417 | va_list args; | ||
418 | |||
419 | va_start(args, fmt); | ||
420 | vsnprintf(qdev->ram_header->log_buf, QXL_LOG_BUF_SIZE, fmt, args); | ||
421 | va_end(args); | ||
422 | /* | ||
423 | * DO not do a DRM output here - this will call printk, which will | ||
424 | * call back into qxl for rendering (qxl_fb) | ||
425 | */ | ||
426 | outb(0, qdev->io_base + QXL_IO_LOG); | ||
427 | } | ||
428 | |||
429 | void qxl_io_reset(struct qxl_device *qdev) | ||
430 | { | ||
431 | outb(0, qdev->io_base + QXL_IO_RESET); | ||
432 | } | ||
433 | |||
434 | void qxl_io_monitors_config(struct qxl_device *qdev) | ||
435 | { | ||
436 | qxl_io_log(qdev, "%s: %d [%dx%d+%d+%d]\n", __func__, | ||
437 | qdev->monitors_config ? | ||
438 | qdev->monitors_config->count : -1, | ||
439 | qdev->monitors_config && qdev->monitors_config->count ? | ||
440 | qdev->monitors_config->heads[0].width : -1, | ||
441 | qdev->monitors_config && qdev->monitors_config->count ? | ||
442 | qdev->monitors_config->heads[0].height : -1, | ||
443 | qdev->monitors_config && qdev->monitors_config->count ? | ||
444 | qdev->monitors_config->heads[0].x : -1, | ||
445 | qdev->monitors_config && qdev->monitors_config->count ? | ||
446 | qdev->monitors_config->heads[0].y : -1 | ||
447 | ); | ||
448 | |||
449 | wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); | ||
450 | } | ||
451 | |||
452 | int qxl_surface_id_alloc(struct qxl_device *qdev, | ||
453 | struct qxl_bo *surf) | ||
454 | { | ||
455 | uint32_t handle = -ENOMEM; | ||
456 | int idr_ret; | ||
457 | int count = 0; | ||
458 | again: | ||
459 | if (idr_pre_get(&qdev->surf_id_idr, GFP_ATOMIC) == 0) { | ||
460 | DRM_ERROR("Out of memory for surf idr\n"); | ||
461 | kfree(surf); | ||
462 | goto alloc_fail; | ||
463 | } | ||
464 | |||
465 | spin_lock(&qdev->surf_id_idr_lock); | ||
466 | idr_ret = idr_get_new_above(&qdev->surf_id_idr, NULL, 1, &handle); | ||
467 | spin_unlock(&qdev->surf_id_idr_lock); | ||
468 | |||
469 | if (idr_ret == -EAGAIN) | ||
470 | goto again; | ||
471 | |||
472 | if (handle >= qdev->rom->n_surfaces) { | ||
473 | count++; | ||
474 | spin_lock(&qdev->surf_id_idr_lock); | ||
475 | idr_remove(&qdev->surf_id_idr, handle); | ||
476 | spin_unlock(&qdev->surf_id_idr_lock); | ||
477 | qxl_reap_surface_id(qdev, 2); | ||
478 | goto again; | ||
479 | } | ||
480 | surf->surface_id = handle; | ||
481 | |||
482 | spin_lock(&qdev->surf_id_idr_lock); | ||
483 | qdev->last_alloced_surf_id = handle; | ||
484 | spin_unlock(&qdev->surf_id_idr_lock); | ||
485 | alloc_fail: | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | void qxl_surface_id_dealloc(struct qxl_device *qdev, | ||
490 | uint32_t surface_id) | ||
491 | { | ||
492 | spin_lock(&qdev->surf_id_idr_lock); | ||
493 | idr_remove(&qdev->surf_id_idr, surface_id); | ||
494 | spin_unlock(&qdev->surf_id_idr_lock); | ||
495 | } | ||
496 | |||
497 | int qxl_hw_surface_alloc(struct qxl_device *qdev, | ||
498 | struct qxl_bo *surf, | ||
499 | struct ttm_mem_reg *new_mem) | ||
500 | { | ||
501 | struct qxl_surface_cmd *cmd; | ||
502 | struct qxl_release *release; | ||
503 | int ret; | ||
504 | |||
505 | if (surf->hw_surf_alloc) | ||
506 | return 0; | ||
507 | |||
508 | ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE, | ||
509 | NULL, | ||
510 | &release); | ||
511 | if (ret) | ||
512 | return ret; | ||
513 | |||
514 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | ||
515 | cmd->type = QXL_SURFACE_CMD_CREATE; | ||
516 | cmd->u.surface_create.format = surf->surf.format; | ||
517 | cmd->u.surface_create.width = surf->surf.width; | ||
518 | cmd->u.surface_create.height = surf->surf.height; | ||
519 | cmd->u.surface_create.stride = surf->surf.stride; | ||
520 | if (new_mem) { | ||
521 | int slot_id = surf->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot; | ||
522 | struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]); | ||
523 | |||
524 | /* TODO - need to hold one of the locks to read tbo.offset */ | ||
525 | cmd->u.surface_create.data = slot->high_bits; | ||
526 | |||
527 | cmd->u.surface_create.data |= (new_mem->start << PAGE_SHIFT) + surf->tbo.bdev->man[new_mem->mem_type].gpu_offset; | ||
528 | } else | ||
529 | cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0); | ||
530 | cmd->surface_id = surf->surface_id; | ||
531 | qxl_release_unmap(qdev, release, &cmd->release_info); | ||
532 | |||
533 | surf->surf_create = release; | ||
534 | |||
535 | /* no need to add a release to the fence for this bo, | ||
536 | since it is only released when we ask to destroy the surface | ||
537 | and it would never signal otherwise */ | ||
538 | qxl_fence_releaseable(qdev, release); | ||
539 | |||
540 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | ||
541 | |||
542 | qxl_release_unreserve(qdev, release); | ||
543 | |||
544 | surf->hw_surf_alloc = true; | ||
545 | spin_lock(&qdev->surf_id_idr_lock); | ||
546 | idr_replace(&qdev->surf_id_idr, surf, surf->surface_id); | ||
547 | spin_unlock(&qdev->surf_id_idr_lock); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | int qxl_hw_surface_dealloc(struct qxl_device *qdev, | ||
552 | struct qxl_bo *surf) | ||
553 | { | ||
554 | struct qxl_surface_cmd *cmd; | ||
555 | struct qxl_release *release; | ||
556 | int ret; | ||
557 | int id; | ||
558 | |||
559 | if (!surf->hw_surf_alloc) | ||
560 | return 0; | ||
561 | |||
562 | ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY, | ||
563 | surf->surf_create, | ||
564 | &release); | ||
565 | if (ret) | ||
566 | return ret; | ||
567 | |||
568 | surf->surf_create = NULL; | ||
569 | /* remove the surface from the idr, but not the surface id yet */ | ||
570 | spin_lock(&qdev->surf_id_idr_lock); | ||
571 | idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id); | ||
572 | spin_unlock(&qdev->surf_id_idr_lock); | ||
573 | surf->hw_surf_alloc = false; | ||
574 | |||
575 | id = surf->surface_id; | ||
576 | surf->surface_id = 0; | ||
577 | |||
578 | release->surface_release_id = id; | ||
579 | cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); | ||
580 | cmd->type = QXL_SURFACE_CMD_DESTROY; | ||
581 | cmd->surface_id = id; | ||
582 | qxl_release_unmap(qdev, release, &cmd->release_info); | ||
583 | |||
584 | qxl_fence_releaseable(qdev, release); | ||
585 | |||
586 | qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); | ||
587 | |||
588 | qxl_release_unreserve(qdev, release); | ||
589 | |||
590 | |||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf) | ||
595 | { | ||
596 | struct qxl_rect rect; | ||
597 | int ret; | ||
598 | |||
599 | /* if we are evicting, we need to make sure the surface is up | ||
600 | to date */ | ||
601 | rect.left = 0; | ||
602 | rect.right = surf->surf.width; | ||
603 | rect.top = 0; | ||
604 | rect.bottom = surf->surf.height; | ||
605 | retry: | ||
606 | ret = qxl_io_update_area(qdev, surf, &rect); | ||
607 | if (ret == -ERESTARTSYS) | ||
608 | goto retry; | ||
609 | return ret; | ||
610 | } | ||
611 | |||
612 | void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) | ||
613 | { | ||
614 | /* no need to update area if we are just freeing the surface normally */ | ||
615 | if (do_update_area) | ||
616 | qxl_update_surface(qdev, surf); | ||
617 | |||
618 | /* nuke the surface id at the hw */ | ||
619 | qxl_hw_surface_dealloc(qdev, surf); | ||
620 | } | ||
621 | |||
622 | void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) | ||
623 | { | ||
624 | mutex_lock(&qdev->surf_evict_mutex); | ||
625 | qxl_surface_evict_locked(qdev, surf, do_update_area); | ||
626 | mutex_unlock(&qdev->surf_evict_mutex); | ||
627 | } | ||
628 | |||
629 | static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall) | ||
630 | { | ||
631 | int ret; | ||
632 | |||
633 | ret = qxl_bo_reserve(surf, false); | ||
634 | if (ret == -EBUSY) | ||
635 | return -EBUSY; | ||
636 | |||
637 | if (surf->fence.num_active_releases > 0 && stall == false) { | ||
638 | qxl_bo_unreserve(surf); | ||
639 | return -EBUSY; | ||
640 | } | ||
641 | |||
642 | if (stall) | ||
643 | mutex_unlock(&qdev->surf_evict_mutex); | ||
644 | |||
645 | spin_lock(&surf->tbo.bdev->fence_lock); | ||
646 | ret = ttm_bo_wait(&surf->tbo, true, true, !stall); | ||
647 | spin_unlock(&surf->tbo.bdev->fence_lock); | ||
648 | |||
649 | if (stall) | ||
650 | mutex_lock(&qdev->surf_evict_mutex); | ||
651 | if (ret == -EBUSY) { | ||
652 | qxl_bo_unreserve(surf); | ||
653 | return -EBUSY; | ||
654 | } | ||
655 | |||
656 | qxl_surface_evict_locked(qdev, surf, true); | ||
657 | qxl_bo_unreserve(surf); | ||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap) | ||
662 | { | ||
663 | int num_reaped = 0; | ||
664 | int i, ret; | ||
665 | bool stall = false; | ||
666 | int start = 0; | ||
667 | |||
668 | mutex_lock(&qdev->surf_evict_mutex); | ||
669 | again: | ||
670 | |||
671 | spin_lock(&qdev->surf_id_idr_lock); | ||
672 | start = qdev->last_alloced_surf_id + 1; | ||
673 | spin_unlock(&qdev->surf_id_idr_lock); | ||
674 | |||
675 | for (i = start; i < start + qdev->rom->n_surfaces; i++) { | ||
676 | void *objptr; | ||
677 | int surfid = i % qdev->rom->n_surfaces; | ||
678 | |||
679 | /* this avoids the case where the objects is in the | ||
680 | idr but has been evicted half way - its makes | ||
681 | the idr lookup atomic with the eviction */ | ||
682 | spin_lock(&qdev->surf_id_idr_lock); | ||
683 | objptr = idr_find(&qdev->surf_id_idr, surfid); | ||
684 | spin_unlock(&qdev->surf_id_idr_lock); | ||
685 | |||
686 | if (!objptr) | ||
687 | continue; | ||
688 | |||
689 | ret = qxl_reap_surf(qdev, objptr, stall); | ||
690 | if (ret == 0) | ||
691 | num_reaped++; | ||
692 | if (num_reaped >= max_to_reap) | ||
693 | break; | ||
694 | } | ||
695 | if (num_reaped == 0 && stall == false) { | ||
696 | stall = true; | ||
697 | goto again; | ||
698 | } | ||
699 | |||
700 | mutex_unlock(&qdev->surf_evict_mutex); | ||
701 | if (num_reaped) { | ||
702 | usleep_range(500, 1000); | ||
703 | qxl_queue_garbage_collect(qdev, true); | ||
704 | } | ||
705 | |||
706 | return 0; | ||
707 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c new file mode 100644 index 000000000000..c630152f2d2f --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c | |||
@@ -0,0 +1,135 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2009 Red Hat <bskeggs@redhat.com> | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining | ||
5 | * a copy of this software and associated documentation files (the | ||
6 | * "Software"), to deal in the Software without restriction, including | ||
7 | * without limitation the rights to use, copy, modify, merge, publish, | ||
8 | * distribute, sublicense, and/or sell copies of the Software, and to | ||
9 | * permit persons to whom the Software is furnished to do so, subject to | ||
10 | * the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the | ||
13 | * next paragraph) shall be included in all copies or substantial | ||
14 | * portions of the Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
17 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
18 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. | ||
19 | * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE | ||
20 | * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||
21 | * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||
22 | * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
23 | * | ||
24 | */ | ||
25 | |||
26 | /* | ||
27 | * Authors: | ||
28 | * Alon Levy <alevy@redhat.com> | ||
29 | */ | ||
30 | |||
31 | #include <linux/debugfs.h> | ||
32 | |||
33 | #include "drmP.h" | ||
34 | #include "qxl_drv.h" | ||
35 | #include "qxl_object.h" | ||
36 | |||
37 | |||
38 | static int | ||
39 | qxl_debugfs_irq_received(struct seq_file *m, void *data) | ||
40 | { | ||
41 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
42 | struct qxl_device *qdev = node->minor->dev->dev_private; | ||
43 | |||
44 | seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); | ||
45 | seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); | ||
46 | seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); | ||
47 | seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); | ||
48 | seq_printf(m, "%d\n", qdev->irq_received_error); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | static int | ||
53 | qxl_debugfs_buffers_info(struct seq_file *m, void *data) | ||
54 | { | ||
55 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
56 | struct qxl_device *qdev = node->minor->dev->dev_private; | ||
57 | struct qxl_bo *bo; | ||
58 | |||
59 | list_for_each_entry(bo, &qdev->gem.objects, list) { | ||
60 | seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n", | ||
61 | (unsigned long)bo->gem_base.size, bo->pin_count, | ||
62 | bo->tbo.sync_obj, bo->fence.num_active_releases); | ||
63 | } | ||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | static struct drm_info_list qxl_debugfs_list[] = { | ||
68 | { "irq_received", qxl_debugfs_irq_received, 0, NULL }, | ||
69 | { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL }, | ||
70 | }; | ||
71 | #define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list) | ||
72 | |||
73 | int | ||
74 | qxl_debugfs_init(struct drm_minor *minor) | ||
75 | { | ||
76 | drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES, | ||
77 | minor->debugfs_root, minor); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | void | ||
82 | qxl_debugfs_takedown(struct drm_minor *minor) | ||
83 | { | ||
84 | drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES, | ||
85 | minor); | ||
86 | } | ||
87 | |||
88 | int qxl_debugfs_add_files(struct qxl_device *qdev, | ||
89 | struct drm_info_list *files, | ||
90 | unsigned nfiles) | ||
91 | { | ||
92 | unsigned i; | ||
93 | |||
94 | for (i = 0; i < qdev->debugfs_count; i++) { | ||
95 | if (qdev->debugfs[i].files == files) { | ||
96 | /* Already registered */ | ||
97 | return 0; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | i = qdev->debugfs_count + 1; | ||
102 | if (i > QXL_DEBUGFS_MAX_COMPONENTS) { | ||
103 | DRM_ERROR("Reached maximum number of debugfs components.\n"); | ||
104 | DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n"); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | qdev->debugfs[qdev->debugfs_count].files = files; | ||
108 | qdev->debugfs[qdev->debugfs_count].num_files = nfiles; | ||
109 | qdev->debugfs_count = i; | ||
110 | #if defined(CONFIG_DEBUG_FS) | ||
111 | drm_debugfs_create_files(files, nfiles, | ||
112 | qdev->ddev->control->debugfs_root, | ||
113 | qdev->ddev->control); | ||
114 | drm_debugfs_create_files(files, nfiles, | ||
115 | qdev->ddev->primary->debugfs_root, | ||
116 | qdev->ddev->primary); | ||
117 | #endif | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | void qxl_debugfs_remove_files(struct qxl_device *qdev) | ||
122 | { | ||
123 | #if defined(CONFIG_DEBUG_FS) | ||
124 | unsigned i; | ||
125 | |||
126 | for (i = 0; i < qdev->debugfs_count; i++) { | ||
127 | drm_debugfs_remove_files(qdev->debugfs[i].files, | ||
128 | qdev->debugfs[i].num_files, | ||
129 | qdev->ddev->control); | ||
130 | drm_debugfs_remove_files(qdev->debugfs[i].files, | ||
131 | qdev->debugfs[i].num_files, | ||
132 | qdev->ddev->primary); | ||
133 | } | ||
134 | #endif | ||
135 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h new file mode 100644 index 000000000000..94c5aec71920 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_dev.h | |||
@@ -0,0 +1,879 @@ | |||
1 | /* | ||
2 | Copyright (C) 2009 Red Hat, Inc. | ||
3 | |||
4 | Redistribution and use in source and binary forms, with or without | ||
5 | modification, are permitted provided that the following conditions are | ||
6 | met: | ||
7 | |||
8 | * Redistributions of source code must retain the above copyright | ||
9 | notice, this list of conditions and the following disclaimer. | ||
10 | * Redistributions in binary form must reproduce the above copyright | ||
11 | notice, this list of conditions and the following disclaimer in | ||
12 | the documentation and/or other materials provided with the | ||
13 | distribution. | ||
14 | * Neither the name of the copyright holder nor the names of its | ||
15 | contributors may be used to endorse or promote products derived | ||
16 | from this software without specific prior written permission. | ||
17 | |||
18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS | ||
19 | IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED | ||
20 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A | ||
21 | PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
22 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
29 | */ | ||
30 | |||
31 | |||
32 | #ifndef H_QXL_DEV | ||
33 | #define H_QXL_DEV | ||
34 | |||
35 | #include <linux/types.h> | ||
36 | |||
37 | /* | ||
38 | * from spice-protocol | ||
39 | * Release 0.10.0 | ||
40 | */ | ||
41 | |||
42 | /* enums.h */ | ||
43 | |||
44 | enum SpiceImageType { | ||
45 | SPICE_IMAGE_TYPE_BITMAP, | ||
46 | SPICE_IMAGE_TYPE_QUIC, | ||
47 | SPICE_IMAGE_TYPE_RESERVED, | ||
48 | SPICE_IMAGE_TYPE_LZ_PLT = 100, | ||
49 | SPICE_IMAGE_TYPE_LZ_RGB, | ||
50 | SPICE_IMAGE_TYPE_GLZ_RGB, | ||
51 | SPICE_IMAGE_TYPE_FROM_CACHE, | ||
52 | SPICE_IMAGE_TYPE_SURFACE, | ||
53 | SPICE_IMAGE_TYPE_JPEG, | ||
54 | SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS, | ||
55 | SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB, | ||
56 | SPICE_IMAGE_TYPE_JPEG_ALPHA, | ||
57 | |||
58 | SPICE_IMAGE_TYPE_ENUM_END | ||
59 | }; | ||
60 | |||
61 | enum SpiceBitmapFmt { | ||
62 | SPICE_BITMAP_FMT_INVALID, | ||
63 | SPICE_BITMAP_FMT_1BIT_LE, | ||
64 | SPICE_BITMAP_FMT_1BIT_BE, | ||
65 | SPICE_BITMAP_FMT_4BIT_LE, | ||
66 | SPICE_BITMAP_FMT_4BIT_BE, | ||
67 | SPICE_BITMAP_FMT_8BIT, | ||
68 | SPICE_BITMAP_FMT_16BIT, | ||
69 | SPICE_BITMAP_FMT_24BIT, | ||
70 | SPICE_BITMAP_FMT_32BIT, | ||
71 | SPICE_BITMAP_FMT_RGBA, | ||
72 | |||
73 | SPICE_BITMAP_FMT_ENUM_END | ||
74 | }; | ||
75 | |||
76 | enum SpiceSurfaceFmt { | ||
77 | SPICE_SURFACE_FMT_INVALID, | ||
78 | SPICE_SURFACE_FMT_1_A, | ||
79 | SPICE_SURFACE_FMT_8_A = 8, | ||
80 | SPICE_SURFACE_FMT_16_555 = 16, | ||
81 | SPICE_SURFACE_FMT_32_xRGB = 32, | ||
82 | SPICE_SURFACE_FMT_16_565 = 80, | ||
83 | SPICE_SURFACE_FMT_32_ARGB = 96, | ||
84 | |||
85 | SPICE_SURFACE_FMT_ENUM_END | ||
86 | }; | ||
87 | |||
88 | enum SpiceClipType { | ||
89 | SPICE_CLIP_TYPE_NONE, | ||
90 | SPICE_CLIP_TYPE_RECTS, | ||
91 | |||
92 | SPICE_CLIP_TYPE_ENUM_END | ||
93 | }; | ||
94 | |||
95 | enum SpiceRopd { | ||
96 | SPICE_ROPD_INVERS_SRC = (1 << 0), | ||
97 | SPICE_ROPD_INVERS_BRUSH = (1 << 1), | ||
98 | SPICE_ROPD_INVERS_DEST = (1 << 2), | ||
99 | SPICE_ROPD_OP_PUT = (1 << 3), | ||
100 | SPICE_ROPD_OP_OR = (1 << 4), | ||
101 | SPICE_ROPD_OP_AND = (1 << 5), | ||
102 | SPICE_ROPD_OP_XOR = (1 << 6), | ||
103 | SPICE_ROPD_OP_BLACKNESS = (1 << 7), | ||
104 | SPICE_ROPD_OP_WHITENESS = (1 << 8), | ||
105 | SPICE_ROPD_OP_INVERS = (1 << 9), | ||
106 | SPICE_ROPD_INVERS_RES = (1 << 10), | ||
107 | |||
108 | SPICE_ROPD_MASK = 0x7ff | ||
109 | }; | ||
110 | |||
111 | enum SpiceBrushType { | ||
112 | SPICE_BRUSH_TYPE_NONE, | ||
113 | SPICE_BRUSH_TYPE_SOLID, | ||
114 | SPICE_BRUSH_TYPE_PATTERN, | ||
115 | |||
116 | SPICE_BRUSH_TYPE_ENUM_END | ||
117 | }; | ||
118 | |||
119 | enum SpiceCursorType { | ||
120 | SPICE_CURSOR_TYPE_ALPHA, | ||
121 | SPICE_CURSOR_TYPE_MONO, | ||
122 | SPICE_CURSOR_TYPE_COLOR4, | ||
123 | SPICE_CURSOR_TYPE_COLOR8, | ||
124 | SPICE_CURSOR_TYPE_COLOR16, | ||
125 | SPICE_CURSOR_TYPE_COLOR24, | ||
126 | SPICE_CURSOR_TYPE_COLOR32, | ||
127 | |||
128 | SPICE_CURSOR_TYPE_ENUM_END | ||
129 | }; | ||
130 | |||
131 | /* qxl_dev.h */ | ||
132 | |||
133 | #pragma pack(push, 1) | ||
134 | |||
135 | #define REDHAT_PCI_VENDOR_ID 0x1b36 | ||
136 | |||
137 | /* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */ | ||
138 | #define QXL_DEVICE_ID_STABLE 0x0100 | ||
139 | |||
140 | enum { | ||
141 | QXL_REVISION_STABLE_V04 = 0x01, | ||
142 | QXL_REVISION_STABLE_V06 = 0x02, | ||
143 | QXL_REVISION_STABLE_V10 = 0x03, | ||
144 | QXL_REVISION_STABLE_V12 = 0x04, | ||
145 | }; | ||
146 | |||
147 | #define QXL_DEVICE_ID_DEVEL 0x01ff | ||
148 | #define QXL_REVISION_DEVEL 0x01 | ||
149 | |||
150 | #define QXL_ROM_MAGIC (*(uint32_t *)"QXRO") | ||
151 | #define QXL_RAM_MAGIC (*(uint32_t *)"QXRA") | ||
152 | |||
153 | enum { | ||
154 | QXL_RAM_RANGE_INDEX, | ||
155 | QXL_VRAM_RANGE_INDEX, | ||
156 | QXL_ROM_RANGE_INDEX, | ||
157 | QXL_IO_RANGE_INDEX, | ||
158 | |||
159 | QXL_PCI_RANGES | ||
160 | }; | ||
161 | |||
162 | /* qxl-1 compat: append only */ | ||
163 | enum { | ||
164 | QXL_IO_NOTIFY_CMD, | ||
165 | QXL_IO_NOTIFY_CURSOR, | ||
166 | QXL_IO_UPDATE_AREA, | ||
167 | QXL_IO_UPDATE_IRQ, | ||
168 | QXL_IO_NOTIFY_OOM, | ||
169 | QXL_IO_RESET, | ||
170 | QXL_IO_SET_MODE, /* qxl-1 */ | ||
171 | QXL_IO_LOG, | ||
172 | /* appended for qxl-2 */ | ||
173 | QXL_IO_MEMSLOT_ADD, | ||
174 | QXL_IO_MEMSLOT_DEL, | ||
175 | QXL_IO_DETACH_PRIMARY, | ||
176 | QXL_IO_ATTACH_PRIMARY, | ||
177 | QXL_IO_CREATE_PRIMARY, | ||
178 | QXL_IO_DESTROY_PRIMARY, | ||
179 | QXL_IO_DESTROY_SURFACE_WAIT, | ||
180 | QXL_IO_DESTROY_ALL_SURFACES, | ||
181 | /* appended for qxl-3 */ | ||
182 | QXL_IO_UPDATE_AREA_ASYNC, | ||
183 | QXL_IO_MEMSLOT_ADD_ASYNC, | ||
184 | QXL_IO_CREATE_PRIMARY_ASYNC, | ||
185 | QXL_IO_DESTROY_PRIMARY_ASYNC, | ||
186 | QXL_IO_DESTROY_SURFACE_ASYNC, | ||
187 | QXL_IO_DESTROY_ALL_SURFACES_ASYNC, | ||
188 | QXL_IO_FLUSH_SURFACES_ASYNC, | ||
189 | QXL_IO_FLUSH_RELEASE, | ||
190 | /* appended for qxl-4 */ | ||
191 | QXL_IO_MONITORS_CONFIG_ASYNC, | ||
192 | |||
193 | QXL_IO_RANGE_SIZE | ||
194 | }; | ||
195 | |||
196 | typedef uint64_t QXLPHYSICAL; | ||
197 | typedef int32_t QXLFIXED; /* fixed 28.4 */ | ||
198 | |||
199 | struct qxl_point_fix { | ||
200 | QXLFIXED x; | ||
201 | QXLFIXED y; | ||
202 | }; | ||
203 | |||
204 | struct qxl_point { | ||
205 | int32_t x; | ||
206 | int32_t y; | ||
207 | }; | ||
208 | |||
209 | struct qxl_point_1_6 { | ||
210 | int16_t x; | ||
211 | int16_t y; | ||
212 | }; | ||
213 | |||
214 | struct qxl_rect { | ||
215 | int32_t top; | ||
216 | int32_t left; | ||
217 | int32_t bottom; | ||
218 | int32_t right; | ||
219 | }; | ||
220 | |||
221 | struct qxl_urect { | ||
222 | uint32_t top; | ||
223 | uint32_t left; | ||
224 | uint32_t bottom; | ||
225 | uint32_t right; | ||
226 | }; | ||
227 | |||
228 | /* qxl-1 compat: append only */ | ||
229 | struct qxl_rom { | ||
230 | uint32_t magic; | ||
231 | uint32_t id; | ||
232 | uint32_t update_id; | ||
233 | uint32_t compression_level; | ||
234 | uint32_t log_level; | ||
235 | uint32_t mode; /* qxl-1 */ | ||
236 | uint32_t modes_offset; | ||
237 | uint32_t num_io_pages; | ||
238 | uint32_t pages_offset; /* qxl-1 */ | ||
239 | uint32_t draw_area_offset; /* qxl-1 */ | ||
240 | uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */ | ||
241 | uint32_t ram_header_offset; | ||
242 | uint32_t mm_clock; | ||
243 | /* appended for qxl-2 */ | ||
244 | uint32_t n_surfaces; | ||
245 | uint64_t flags; | ||
246 | uint8_t slots_start; | ||
247 | uint8_t slots_end; | ||
248 | uint8_t slot_gen_bits; | ||
249 | uint8_t slot_id_bits; | ||
250 | uint8_t slot_generation; | ||
251 | /* appended for qxl-4 */ | ||
252 | uint8_t client_present; | ||
253 | uint8_t client_capabilities[58]; | ||
254 | uint32_t client_monitors_config_crc; | ||
255 | struct { | ||
256 | uint16_t count; | ||
257 | uint16_t padding; | ||
258 | struct qxl_urect heads[64]; | ||
259 | } client_monitors_config; | ||
260 | }; | ||
261 | |||
262 | /* qxl-1 compat: fixed */ | ||
263 | struct qxl_mode { | ||
264 | uint32_t id; | ||
265 | uint32_t x_res; | ||
266 | uint32_t y_res; | ||
267 | uint32_t bits; | ||
268 | uint32_t stride; | ||
269 | uint32_t x_mili; | ||
270 | uint32_t y_mili; | ||
271 | uint32_t orientation; | ||
272 | }; | ||
273 | |||
274 | /* qxl-1 compat: fixed */ | ||
275 | struct qxl_modes { | ||
276 | uint32_t n_modes; | ||
277 | struct qxl_mode modes[0]; | ||
278 | }; | ||
279 | |||
280 | /* qxl-1 compat: append only */ | ||
281 | enum qxl_cmd_type { | ||
282 | QXL_CMD_NOP, | ||
283 | QXL_CMD_DRAW, | ||
284 | QXL_CMD_UPDATE, | ||
285 | QXL_CMD_CURSOR, | ||
286 | QXL_CMD_MESSAGE, | ||
287 | QXL_CMD_SURFACE, | ||
288 | }; | ||
289 | |||
290 | /* qxl-1 compat: fixed */ | ||
291 | struct qxl_command { | ||
292 | QXLPHYSICAL data; | ||
293 | uint32_t type; | ||
294 | uint32_t padding; | ||
295 | }; | ||
296 | |||
297 | #define QXL_COMMAND_FLAG_COMPAT (1<<0) | ||
298 | #define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0) | ||
299 | |||
300 | struct qxl_command_ext { | ||
301 | struct qxl_command cmd; | ||
302 | uint32_t group_id; | ||
303 | uint32_t flags; | ||
304 | }; | ||
305 | |||
306 | struct qxl_mem_slot { | ||
307 | uint64_t mem_start; | ||
308 | uint64_t mem_end; | ||
309 | }; | ||
310 | |||
311 | #define QXL_SURF_TYPE_PRIMARY 0 | ||
312 | |||
313 | #define QXL_SURF_FLAG_KEEP_DATA (1 << 0) | ||
314 | |||
315 | struct qxl_surface_create { | ||
316 | uint32_t width; | ||
317 | uint32_t height; | ||
318 | int32_t stride; | ||
319 | uint32_t format; | ||
320 | uint32_t position; | ||
321 | uint32_t mouse_mode; | ||
322 | uint32_t flags; | ||
323 | uint32_t type; | ||
324 | QXLPHYSICAL mem; | ||
325 | }; | ||
326 | |||
327 | #define QXL_COMMAND_RING_SIZE 32 | ||
328 | #define QXL_CURSOR_RING_SIZE 32 | ||
329 | #define QXL_RELEASE_RING_SIZE 8 | ||
330 | |||
331 | #define QXL_LOG_BUF_SIZE 4096 | ||
332 | |||
333 | #define QXL_INTERRUPT_DISPLAY (1 << 0) | ||
334 | #define QXL_INTERRUPT_CURSOR (1 << 1) | ||
335 | #define QXL_INTERRUPT_IO_CMD (1 << 2) | ||
336 | #define QXL_INTERRUPT_ERROR (1 << 3) | ||
337 | #define QXL_INTERRUPT_CLIENT (1 << 4) | ||
338 | #define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 5) | ||
339 | |||
340 | struct qxl_ring_header { | ||
341 | uint32_t num_items; | ||
342 | uint32_t prod; | ||
343 | uint32_t notify_on_prod; | ||
344 | uint32_t cons; | ||
345 | uint32_t notify_on_cons; | ||
346 | }; | ||
347 | |||
348 | /* qxl-1 compat: append only */ | ||
349 | struct qxl_ram_header { | ||
350 | uint32_t magic; | ||
351 | uint32_t int_pending; | ||
352 | uint32_t int_mask; | ||
353 | uint8_t log_buf[QXL_LOG_BUF_SIZE]; | ||
354 | struct qxl_ring_header cmd_ring_hdr; | ||
355 | struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE]; | ||
356 | struct qxl_ring_header cursor_ring_hdr; | ||
357 | struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE]; | ||
358 | struct qxl_ring_header release_ring_hdr; | ||
359 | uint64_t release_ring[QXL_RELEASE_RING_SIZE]; | ||
360 | struct qxl_rect update_area; | ||
361 | /* appended for qxl-2 */ | ||
362 | uint32_t update_surface; | ||
363 | struct qxl_mem_slot mem_slot; | ||
364 | struct qxl_surface_create create_surface; | ||
365 | uint64_t flags; | ||
366 | |||
367 | /* appended for qxl-4 */ | ||
368 | |||
369 | /* used by QXL_IO_MONITORS_CONFIG_ASYNC */ | ||
370 | QXLPHYSICAL monitors_config; | ||
371 | uint8_t guest_capabilities[64]; | ||
372 | }; | ||
373 | |||
374 | union qxl_release_info { | ||
375 | uint64_t id; /* in */ | ||
376 | uint64_t next; /* out */ | ||
377 | }; | ||
378 | |||
379 | struct qxl_release_info_ext { | ||
380 | union qxl_release_info *info; | ||
381 | uint32_t group_id; | ||
382 | }; | ||
383 | |||
384 | struct qxl_data_chunk { | ||
385 | uint32_t data_size; | ||
386 | QXLPHYSICAL prev_chunk; | ||
387 | QXLPHYSICAL next_chunk; | ||
388 | uint8_t data[0]; | ||
389 | }; | ||
390 | |||
391 | struct qxl_message { | ||
392 | union qxl_release_info release_info; | ||
393 | uint8_t data[0]; | ||
394 | }; | ||
395 | |||
396 | struct qxl_compat_update_cmd { | ||
397 | union qxl_release_info release_info; | ||
398 | struct qxl_rect area; | ||
399 | uint32_t update_id; | ||
400 | }; | ||
401 | |||
402 | struct qxl_update_cmd { | ||
403 | union qxl_release_info release_info; | ||
404 | struct qxl_rect area; | ||
405 | uint32_t update_id; | ||
406 | uint32_t surface_id; | ||
407 | }; | ||
408 | |||
409 | struct qxl_cursor_header { | ||
410 | uint64_t unique; | ||
411 | uint16_t type; | ||
412 | uint16_t width; | ||
413 | uint16_t height; | ||
414 | uint16_t hot_spot_x; | ||
415 | uint16_t hot_spot_y; | ||
416 | }; | ||
417 | |||
418 | struct qxl_cursor { | ||
419 | struct qxl_cursor_header header; | ||
420 | uint32_t data_size; | ||
421 | struct qxl_data_chunk chunk; | ||
422 | }; | ||
423 | |||
424 | enum { | ||
425 | QXL_CURSOR_SET, | ||
426 | QXL_CURSOR_MOVE, | ||
427 | QXL_CURSOR_HIDE, | ||
428 | QXL_CURSOR_TRAIL, | ||
429 | }; | ||
430 | |||
431 | #define QXL_CURSOR_DEVICE_DATA_SIZE 128 | ||
432 | |||
433 | struct qxl_cursor_cmd { | ||
434 | union qxl_release_info release_info; | ||
435 | uint8_t type; | ||
436 | union { | ||
437 | struct { | ||
438 | struct qxl_point_1_6 position; | ||
439 | uint8_t visible; | ||
440 | QXLPHYSICAL shape; | ||
441 | } set; | ||
442 | struct { | ||
443 | uint16_t length; | ||
444 | uint16_t frequency; | ||
445 | } trail; | ||
446 | struct qxl_point_1_6 position; | ||
447 | } u; | ||
448 | /* todo: dynamic size from rom */ | ||
449 | uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE]; | ||
450 | }; | ||
451 | |||
452 | enum { | ||
453 | QXL_DRAW_NOP, | ||
454 | QXL_DRAW_FILL, | ||
455 | QXL_DRAW_OPAQUE, | ||
456 | QXL_DRAW_COPY, | ||
457 | QXL_COPY_BITS, | ||
458 | QXL_DRAW_BLEND, | ||
459 | QXL_DRAW_BLACKNESS, | ||
460 | QXL_DRAW_WHITENESS, | ||
461 | QXL_DRAW_INVERS, | ||
462 | QXL_DRAW_ROP3, | ||
463 | QXL_DRAW_STROKE, | ||
464 | QXL_DRAW_TEXT, | ||
465 | QXL_DRAW_TRANSPARENT, | ||
466 | QXL_DRAW_ALPHA_BLEND, | ||
467 | QXL_DRAW_COMPOSITE | ||
468 | }; | ||
469 | |||
470 | struct qxl_raster_glyph { | ||
471 | struct qxl_point render_pos; | ||
472 | struct qxl_point glyph_origin; | ||
473 | uint16_t width; | ||
474 | uint16_t height; | ||
475 | uint8_t data[0]; | ||
476 | }; | ||
477 | |||
478 | struct qxl_string { | ||
479 | uint32_t data_size; | ||
480 | uint16_t length; | ||
481 | uint16_t flags; | ||
482 | struct qxl_data_chunk chunk; | ||
483 | }; | ||
484 | |||
485 | struct qxl_copy_bits { | ||
486 | struct qxl_point src_pos; | ||
487 | }; | ||
488 | |||
489 | enum qxl_effect_type { | ||
490 | QXL_EFFECT_BLEND = 0, | ||
491 | QXL_EFFECT_OPAQUE = 1, | ||
492 | QXL_EFFECT_REVERT_ON_DUP = 2, | ||
493 | QXL_EFFECT_BLACKNESS_ON_DUP = 3, | ||
494 | QXL_EFFECT_WHITENESS_ON_DUP = 4, | ||
495 | QXL_EFFECT_NOP_ON_DUP = 5, | ||
496 | QXL_EFFECT_NOP = 6, | ||
497 | QXL_EFFECT_OPAQUE_BRUSH = 7 | ||
498 | }; | ||
499 | |||
500 | struct qxl_pattern { | ||
501 | QXLPHYSICAL pat; | ||
502 | struct qxl_point pos; | ||
503 | }; | ||
504 | |||
505 | struct qxl_brush { | ||
506 | uint32_t type; | ||
507 | union { | ||
508 | uint32_t color; | ||
509 | struct qxl_pattern pattern; | ||
510 | } u; | ||
511 | }; | ||
512 | |||
513 | struct qxl_q_mask { | ||
514 | uint8_t flags; | ||
515 | struct qxl_point pos; | ||
516 | QXLPHYSICAL bitmap; | ||
517 | }; | ||
518 | |||
519 | struct qxl_fill { | ||
520 | struct qxl_brush brush; | ||
521 | uint16_t rop_descriptor; | ||
522 | struct qxl_q_mask mask; | ||
523 | }; | ||
524 | |||
525 | struct qxl_opaque { | ||
526 | QXLPHYSICAL src_bitmap; | ||
527 | struct qxl_rect src_area; | ||
528 | struct qxl_brush brush; | ||
529 | uint16_t rop_descriptor; | ||
530 | uint8_t scale_mode; | ||
531 | struct qxl_q_mask mask; | ||
532 | }; | ||
533 | |||
534 | struct qxl_copy { | ||
535 | QXLPHYSICAL src_bitmap; | ||
536 | struct qxl_rect src_area; | ||
537 | uint16_t rop_descriptor; | ||
538 | uint8_t scale_mode; | ||
539 | struct qxl_q_mask mask; | ||
540 | }; | ||
541 | |||
542 | struct qxl_transparent { | ||
543 | QXLPHYSICAL src_bitmap; | ||
544 | struct qxl_rect src_area; | ||
545 | uint32_t src_color; | ||
546 | uint32_t true_color; | ||
547 | }; | ||
548 | |||
549 | struct qxl_alpha_blend { | ||
550 | uint16_t alpha_flags; | ||
551 | uint8_t alpha; | ||
552 | QXLPHYSICAL src_bitmap; | ||
553 | struct qxl_rect src_area; | ||
554 | }; | ||
555 | |||
556 | struct qxl_compat_alpha_blend { | ||
557 | uint8_t alpha; | ||
558 | QXLPHYSICAL src_bitmap; | ||
559 | struct qxl_rect src_area; | ||
560 | }; | ||
561 | |||
562 | struct qxl_rop_3 { | ||
563 | QXLPHYSICAL src_bitmap; | ||
564 | struct qxl_rect src_area; | ||
565 | struct qxl_brush brush; | ||
566 | uint8_t rop3; | ||
567 | uint8_t scale_mode; | ||
568 | struct qxl_q_mask mask; | ||
569 | }; | ||
570 | |||
571 | struct qxl_line_attr { | ||
572 | uint8_t flags; | ||
573 | uint8_t join_style; | ||
574 | uint8_t end_style; | ||
575 | uint8_t style_nseg; | ||
576 | QXLFIXED width; | ||
577 | QXLFIXED miter_limit; | ||
578 | QXLPHYSICAL style; | ||
579 | }; | ||
580 | |||
581 | struct qxl_stroke { | ||
582 | QXLPHYSICAL path; | ||
583 | struct qxl_line_attr attr; | ||
584 | struct qxl_brush brush; | ||
585 | uint16_t fore_mode; | ||
586 | uint16_t back_mode; | ||
587 | }; | ||
588 | |||
589 | struct qxl_text { | ||
590 | QXLPHYSICAL str; | ||
591 | struct qxl_rect back_area; | ||
592 | struct qxl_brush fore_brush; | ||
593 | struct qxl_brush back_brush; | ||
594 | uint16_t fore_mode; | ||
595 | uint16_t back_mode; | ||
596 | }; | ||
597 | |||
598 | struct qxl_mask { | ||
599 | struct qxl_q_mask mask; | ||
600 | }; | ||
601 | |||
602 | struct qxl_clip { | ||
603 | uint32_t type; | ||
604 | QXLPHYSICAL data; | ||
605 | }; | ||
606 | |||
607 | enum qxl_operator { | ||
608 | QXL_OP_CLEAR = 0x00, | ||
609 | QXL_OP_SOURCE = 0x01, | ||
610 | QXL_OP_DST = 0x02, | ||
611 | QXL_OP_OVER = 0x03, | ||
612 | QXL_OP_OVER_REVERSE = 0x04, | ||
613 | QXL_OP_IN = 0x05, | ||
614 | QXL_OP_IN_REVERSE = 0x06, | ||
615 | QXL_OP_OUT = 0x07, | ||
616 | QXL_OP_OUT_REVERSE = 0x08, | ||
617 | QXL_OP_ATOP = 0x09, | ||
618 | QXL_OP_ATOP_REVERSE = 0x0a, | ||
619 | QXL_OP_XOR = 0x0b, | ||
620 | QXL_OP_ADD = 0x0c, | ||
621 | QXL_OP_SATURATE = 0x0d, | ||
622 | /* Note the jump here from 0x0d to 0x30 */ | ||
623 | QXL_OP_MULTIPLY = 0x30, | ||
624 | QXL_OP_SCREEN = 0x31, | ||
625 | QXL_OP_OVERLAY = 0x32, | ||
626 | QXL_OP_DARKEN = 0x33, | ||
627 | QXL_OP_LIGHTEN = 0x34, | ||
628 | QXL_OP_COLOR_DODGE = 0x35, | ||
629 | QXL_OP_COLOR_BURN = 0x36, | ||
630 | QXL_OP_HARD_LIGHT = 0x37, | ||
631 | QXL_OP_SOFT_LIGHT = 0x38, | ||
632 | QXL_OP_DIFFERENCE = 0x39, | ||
633 | QXL_OP_EXCLUSION = 0x3a, | ||
634 | QXL_OP_HSL_HUE = 0x3b, | ||
635 | QXL_OP_HSL_SATURATION = 0x3c, | ||
636 | QXL_OP_HSL_COLOR = 0x3d, | ||
637 | QXL_OP_HSL_LUMINOSITY = 0x3e | ||
638 | }; | ||
639 | |||
640 | struct qxl_transform { | ||
641 | uint32_t t00; | ||
642 | uint32_t t01; | ||
643 | uint32_t t02; | ||
644 | uint32_t t10; | ||
645 | uint32_t t11; | ||
646 | uint32_t t12; | ||
647 | }; | ||
648 | |||
649 | /* The flags field has the following bit fields: | ||
650 | * | ||
651 | * operator: [ 0 - 7 ] | ||
652 | * src_filter: [ 8 - 10 ] | ||
653 | * mask_filter: [ 11 - 13 ] | ||
654 | * src_repeat: [ 14 - 15 ] | ||
655 | * mask_repeat: [ 16 - 17 ] | ||
656 | * component_alpha: [ 18 - 18 ] | ||
657 | * reserved: [ 19 - 31 ] | ||
658 | * | ||
659 | * The repeat and filter values are those of pixman: | ||
660 | * REPEAT_NONE = 0 | ||
661 | * REPEAT_NORMAL = 1 | ||
662 | * REPEAT_PAD = 2 | ||
663 | * REPEAT_REFLECT = 3 | ||
664 | * | ||
665 | * The filter values are: | ||
666 | * FILTER_NEAREST = 0 | ||
667 | * FILTER_BILINEAR = 1 | ||
668 | */ | ||
669 | struct qxl_composite { | ||
670 | uint32_t flags; | ||
671 | |||
672 | QXLPHYSICAL src; | ||
673 | QXLPHYSICAL src_transform; /* May be NULL */ | ||
674 | QXLPHYSICAL mask; /* May be NULL */ | ||
675 | QXLPHYSICAL mask_transform; /* May be NULL */ | ||
676 | struct qxl_point_1_6 src_origin; | ||
677 | struct qxl_point_1_6 mask_origin; | ||
678 | }; | ||
679 | |||
680 | struct qxl_compat_drawable { | ||
681 | union qxl_release_info release_info; | ||
682 | uint8_t effect; | ||
683 | uint8_t type; | ||
684 | uint16_t bitmap_offset; | ||
685 | struct qxl_rect bitmap_area; | ||
686 | struct qxl_rect bbox; | ||
687 | struct qxl_clip clip; | ||
688 | uint32_t mm_time; | ||
689 | union { | ||
690 | struct qxl_fill fill; | ||
691 | struct qxl_opaque opaque; | ||
692 | struct qxl_copy copy; | ||
693 | struct qxl_transparent transparent; | ||
694 | struct qxl_compat_alpha_blend alpha_blend; | ||
695 | struct qxl_copy_bits copy_bits; | ||
696 | struct qxl_copy blend; | ||
697 | struct qxl_rop_3 rop3; | ||
698 | struct qxl_stroke stroke; | ||
699 | struct qxl_text text; | ||
700 | struct qxl_mask blackness; | ||
701 | struct qxl_mask invers; | ||
702 | struct qxl_mask whiteness; | ||
703 | } u; | ||
704 | }; | ||
705 | |||
706 | struct qxl_drawable { | ||
707 | union qxl_release_info release_info; | ||
708 | uint32_t surface_id; | ||
709 | uint8_t effect; | ||
710 | uint8_t type; | ||
711 | uint8_t self_bitmap; | ||
712 | struct qxl_rect self_bitmap_area; | ||
713 | struct qxl_rect bbox; | ||
714 | struct qxl_clip clip; | ||
715 | uint32_t mm_time; | ||
716 | int32_t surfaces_dest[3]; | ||
717 | struct qxl_rect surfaces_rects[3]; | ||
718 | union { | ||
719 | struct qxl_fill fill; | ||
720 | struct qxl_opaque opaque; | ||
721 | struct qxl_copy copy; | ||
722 | struct qxl_transparent transparent; | ||
723 | struct qxl_alpha_blend alpha_blend; | ||
724 | struct qxl_copy_bits copy_bits; | ||
725 | struct qxl_copy blend; | ||
726 | struct qxl_rop_3 rop3; | ||
727 | struct qxl_stroke stroke; | ||
728 | struct qxl_text text; | ||
729 | struct qxl_mask blackness; | ||
730 | struct qxl_mask invers; | ||
731 | struct qxl_mask whiteness; | ||
732 | struct qxl_composite composite; | ||
733 | } u; | ||
734 | }; | ||
735 | |||
736 | enum qxl_surface_cmd_type { | ||
737 | QXL_SURFACE_CMD_CREATE, | ||
738 | QXL_SURFACE_CMD_DESTROY, | ||
739 | }; | ||
740 | |||
741 | struct qxl_surface { | ||
742 | uint32_t format; | ||
743 | uint32_t width; | ||
744 | uint32_t height; | ||
745 | int32_t stride; | ||
746 | QXLPHYSICAL data; | ||
747 | }; | ||
748 | |||
749 | struct qxl_surface_cmd { | ||
750 | union qxl_release_info release_info; | ||
751 | uint32_t surface_id; | ||
752 | uint8_t type; | ||
753 | uint32_t flags; | ||
754 | union { | ||
755 | struct qxl_surface surface_create; | ||
756 | } u; | ||
757 | }; | ||
758 | |||
759 | struct qxl_clip_rects { | ||
760 | uint32_t num_rects; | ||
761 | struct qxl_data_chunk chunk; | ||
762 | }; | ||
763 | |||
764 | enum { | ||
765 | QXL_PATH_BEGIN = (1 << 0), | ||
766 | QXL_PATH_END = (1 << 1), | ||
767 | QXL_PATH_CLOSE = (1 << 3), | ||
768 | QXL_PATH_BEZIER = (1 << 4), | ||
769 | }; | ||
770 | |||
771 | struct qxl_path_seg { | ||
772 | uint32_t flags; | ||
773 | uint32_t count; | ||
774 | struct qxl_point_fix points[0]; | ||
775 | }; | ||
776 | |||
777 | struct qxl_path { | ||
778 | uint32_t data_size; | ||
779 | struct qxl_data_chunk chunk; | ||
780 | }; | ||
781 | |||
782 | enum { | ||
783 | QXL_IMAGE_GROUP_DRIVER, | ||
784 | QXL_IMAGE_GROUP_DEVICE, | ||
785 | QXL_IMAGE_GROUP_RED, | ||
786 | QXL_IMAGE_GROUP_DRIVER_DONT_CACHE, | ||
787 | }; | ||
788 | |||
789 | struct qxl_image_id { | ||
790 | uint32_t group; | ||
791 | uint32_t unique; | ||
792 | }; | ||
793 | |||
794 | union qxl_image_id_union { | ||
795 | struct qxl_image_id id; | ||
796 | uint64_t value; | ||
797 | }; | ||
798 | |||
799 | enum qxl_image_flags { | ||
800 | QXL_IMAGE_CACHE = (1 << 0), | ||
801 | QXL_IMAGE_HIGH_BITS_SET = (1 << 1), | ||
802 | }; | ||
803 | |||
804 | enum qxl_bitmap_flags { | ||
805 | QXL_BITMAP_DIRECT = (1 << 0), | ||
806 | QXL_BITMAP_UNSTABLE = (1 << 1), | ||
807 | QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */ | ||
808 | }; | ||
809 | |||
810 | #define QXL_SET_IMAGE_ID(image, _group, _unique) { \ | ||
811 | (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \ | ||
812 | } | ||
813 | |||
814 | struct qxl_image_descriptor { | ||
815 | uint64_t id; | ||
816 | uint8_t type; | ||
817 | uint8_t flags; | ||
818 | uint32_t width; | ||
819 | uint32_t height; | ||
820 | }; | ||
821 | |||
822 | struct qxl_palette { | ||
823 | uint64_t unique; | ||
824 | uint16_t num_ents; | ||
825 | uint32_t ents[0]; | ||
826 | }; | ||
827 | |||
828 | struct qxl_bitmap { | ||
829 | uint8_t format; | ||
830 | uint8_t flags; | ||
831 | uint32_t x; | ||
832 | uint32_t y; | ||
833 | uint32_t stride; | ||
834 | QXLPHYSICAL palette; | ||
835 | QXLPHYSICAL data; /* data[0] ? */ | ||
836 | }; | ||
837 | |||
838 | struct qxl_surface_id { | ||
839 | uint32_t surface_id; | ||
840 | }; | ||
841 | |||
842 | struct qxl_encoder_data { | ||
843 | uint32_t data_size; | ||
844 | uint8_t data[0]; | ||
845 | }; | ||
846 | |||
847 | struct qxl_image { | ||
848 | struct qxl_image_descriptor descriptor; | ||
849 | union { /* variable length */ | ||
850 | struct qxl_bitmap bitmap; | ||
851 | struct qxl_encoder_data quic; | ||
852 | struct qxl_surface_id surface_image; | ||
853 | } u; | ||
854 | }; | ||
855 | |||
856 | /* A QXLHead is a single monitor output backed by a QXLSurface. | ||
857 | * x and y offsets are unsigned since they are used in relation to | ||
858 | * the given surface, not the same as the x, y coordinates in the guest | ||
859 | * screen reference frame. */ | ||
860 | struct qxl_head { | ||
861 | uint32_t id; | ||
862 | uint32_t surface_id; | ||
863 | uint32_t width; | ||
864 | uint32_t height; | ||
865 | uint32_t x; | ||
866 | uint32_t y; | ||
867 | uint32_t flags; | ||
868 | }; | ||
869 | |||
870 | struct qxl_monitors_config { | ||
871 | uint16_t count; | ||
872 | uint16_t max_allowed; /* If it is 0 no fixed limit is given by the | ||
873 | driver */ | ||
874 | struct qxl_head heads[0]; | ||
875 | }; | ||
876 | |||
877 | #pragma pack(pop) | ||
878 | |||
879 | #endif /* _H_QXL_DEV */ | ||
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c new file mode 100644 index 000000000000..c80ddfedbbab --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_display.c | |||
@@ -0,0 +1,981 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | |||
27 | #include "linux/crc32.h" | ||
28 | |||
29 | #include "qxl_drv.h" | ||
30 | #include "qxl_object.h" | ||
31 | #include "drm_crtc_helper.h" | ||
32 | |||
33 | static void qxl_crtc_set_to_mode(struct qxl_device *qdev, | ||
34 | struct drm_connector *connector, | ||
35 | struct qxl_head *head) | ||
36 | { | ||
37 | struct drm_device *dev = connector->dev; | ||
38 | struct drm_display_mode *mode, *t; | ||
39 | int width = head->width; | ||
40 | int height = head->height; | ||
41 | |||
42 | if (width < 320 || height < 240) { | ||
43 | qxl_io_log(qdev, "%s: bad head: %dx%d", width, height); | ||
44 | width = 1024; | ||
45 | height = 768; | ||
46 | } | ||
47 | if (width * height * 4 > 16*1024*1024) { | ||
48 | width = 1024; | ||
49 | height = 768; | ||
50 | } | ||
51 | /* TODO: go over regular modes and removed preferred? */ | ||
52 | list_for_each_entry_safe(mode, t, &connector->probed_modes, head) | ||
53 | drm_mode_remove(connector, mode); | ||
54 | mode = drm_cvt_mode(dev, width, height, 60, false, false, false); | ||
55 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
56 | mode->status = MODE_OK; | ||
57 | drm_mode_probed_add(connector, mode); | ||
58 | qxl_io_log(qdev, "%s: %d x %d\n", __func__, width, height); | ||
59 | } | ||
60 | |||
61 | void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev) | ||
62 | { | ||
63 | struct drm_connector *connector; | ||
64 | int i; | ||
65 | struct drm_device *dev = qdev->ddev; | ||
66 | |||
67 | i = 0; | ||
68 | qxl_io_log(qdev, "%s: %d, %d\n", __func__, | ||
69 | dev->mode_config.num_connector, | ||
70 | qdev->monitors_config->count); | ||
71 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
72 | if (i > qdev->monitors_config->count) { | ||
73 | /* crtc will be reported as disabled */ | ||
74 | continue; | ||
75 | } | ||
76 | qxl_crtc_set_to_mode(qdev, connector, | ||
77 | &qdev->monitors_config->heads[i]); | ||
78 | ++i; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count) | ||
83 | { | ||
84 | if (qdev->client_monitors_config && | ||
85 | count > qdev->client_monitors_config->count) { | ||
86 | kfree(qdev->client_monitors_config); | ||
87 | } | ||
88 | if (!qdev->client_monitors_config) { | ||
89 | qdev->client_monitors_config = kzalloc( | ||
90 | sizeof(struct qxl_monitors_config) + | ||
91 | sizeof(struct qxl_head) * count, GFP_KERNEL); | ||
92 | if (!qdev->client_monitors_config) { | ||
93 | qxl_io_log(qdev, | ||
94 | "%s: allocation failure for %u heads\n", | ||
95 | __func__, count); | ||
96 | return; | ||
97 | } | ||
98 | } | ||
99 | qdev->client_monitors_config->count = count; | ||
100 | } | ||
101 | |||
102 | static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) | ||
103 | { | ||
104 | int i; | ||
105 | int num_monitors; | ||
106 | uint32_t crc; | ||
107 | |||
108 | BUG_ON(!qdev->monitors_config); | ||
109 | num_monitors = qdev->rom->client_monitors_config.count; | ||
110 | crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, | ||
111 | sizeof(qdev->rom->client_monitors_config)); | ||
112 | if (crc != qdev->rom->client_monitors_config_crc) { | ||
113 | qxl_io_log(qdev, "crc mismatch: have %X (%d) != %X\n", crc, | ||
114 | sizeof(qdev->rom->client_monitors_config), | ||
115 | qdev->rom->client_monitors_config_crc); | ||
116 | return 1; | ||
117 | } | ||
118 | if (num_monitors > qdev->monitors_config->max_allowed) { | ||
119 | DRM_INFO("client monitors list will be truncated: %d < %d\n", | ||
120 | qdev->monitors_config->max_allowed, num_monitors); | ||
121 | num_monitors = qdev->monitors_config->max_allowed; | ||
122 | } else { | ||
123 | num_monitors = qdev->rom->client_monitors_config.count; | ||
124 | } | ||
125 | qxl_alloc_client_monitors_config(qdev, num_monitors); | ||
126 | /* we copy max from the client but it isn't used */ | ||
127 | qdev->client_monitors_config->max_allowed = | ||
128 | qdev->monitors_config->max_allowed; | ||
129 | for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) { | ||
130 | struct qxl_urect *c_rect = | ||
131 | &qdev->rom->client_monitors_config.heads[i]; | ||
132 | struct qxl_head *client_head = | ||
133 | &qdev->client_monitors_config->heads[i]; | ||
134 | struct qxl_head *head = &qdev->monitors_config->heads[i]; | ||
135 | client_head->x = head->x = c_rect->left; | ||
136 | client_head->y = head->y = c_rect->top; | ||
137 | client_head->width = head->width = | ||
138 | c_rect->right - c_rect->left; | ||
139 | client_head->height = head->height = | ||
140 | c_rect->bottom - c_rect->top; | ||
141 | client_head->surface_id = head->surface_id = 0; | ||
142 | client_head->id = head->id = i; | ||
143 | client_head->flags = head->flags = 0; | ||
144 | QXL_DEBUG(qdev, "read %dx%d+%d+%d\n", head->width, head->height, | ||
145 | head->x, head->y); | ||
146 | } | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | void qxl_display_read_client_monitors_config(struct qxl_device *qdev) | ||
151 | { | ||
152 | |||
153 | while (qxl_display_copy_rom_client_monitors_config(qdev)) { | ||
154 | qxl_io_log(qdev, "failed crc check for client_monitors_config," | ||
155 | " retrying\n"); | ||
156 | } | ||
157 | qxl_crtc_set_from_monitors_config(qdev); | ||
158 | /* fire off a uevent and let userspace tell us what to do */ | ||
159 | qxl_io_log(qdev, "calling drm_sysfs_hotplug_event\n"); | ||
160 | drm_sysfs_hotplug_event(qdev->ddev); | ||
161 | } | ||
162 | |||
163 | static int qxl_add_monitors_config_modes(struct drm_connector *connector) | ||
164 | { | ||
165 | struct drm_device *dev = connector->dev; | ||
166 | struct qxl_device *qdev = dev->dev_private; | ||
167 | struct qxl_output *output = drm_connector_to_qxl_output(connector); | ||
168 | int h = output->index; | ||
169 | struct drm_display_mode *mode = NULL; | ||
170 | struct qxl_head *head; | ||
171 | |||
172 | if (!qdev->monitors_config) | ||
173 | return 0; | ||
174 | head = &qdev->monitors_config->heads[h]; | ||
175 | |||
176 | mode = drm_cvt_mode(dev, head->width, head->height, 60, false, false, | ||
177 | false); | ||
178 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
179 | drm_mode_probed_add(connector, mode); | ||
180 | return 1; | ||
181 | } | ||
182 | |||
183 | static int qxl_add_common_modes(struct drm_connector *connector) | ||
184 | { | ||
185 | struct drm_device *dev = connector->dev; | ||
186 | struct drm_display_mode *mode = NULL; | ||
187 | int i; | ||
188 | struct mode_size { | ||
189 | int w; | ||
190 | int h; | ||
191 | } common_modes[] = { | ||
192 | { 640, 480}, | ||
193 | { 720, 480}, | ||
194 | { 800, 600}, | ||
195 | { 848, 480}, | ||
196 | {1024, 768}, | ||
197 | {1152, 768}, | ||
198 | {1280, 720}, | ||
199 | {1280, 800}, | ||
200 | {1280, 854}, | ||
201 | {1280, 960}, | ||
202 | {1280, 1024}, | ||
203 | {1440, 900}, | ||
204 | {1400, 1050}, | ||
205 | {1680, 1050}, | ||
206 | {1600, 1200}, | ||
207 | {1920, 1080}, | ||
208 | {1920, 1200} | ||
209 | }; | ||
210 | |||
211 | for (i = 0; i < ARRAY_SIZE(common_modes); i++) { | ||
212 | if (common_modes[i].w < 320 || common_modes[i].h < 200) | ||
213 | continue; | ||
214 | |||
215 | mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, | ||
216 | 60, false, false, false); | ||
217 | if (common_modes[i].w == 1024 && common_modes[i].h == 768) | ||
218 | mode->type |= DRM_MODE_TYPE_PREFERRED; | ||
219 | drm_mode_probed_add(connector, mode); | ||
220 | } | ||
221 | return i - 1; | ||
222 | } | ||
223 | |||
224 | static void qxl_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, | ||
225 | u16 *blue, uint32_t start, uint32_t size) | ||
226 | { | ||
227 | /* TODO */ | ||
228 | } | ||
229 | |||
230 | static void qxl_crtc_destroy(struct drm_crtc *crtc) | ||
231 | { | ||
232 | struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); | ||
233 | |||
234 | drm_crtc_cleanup(crtc); | ||
235 | kfree(qxl_crtc); | ||
236 | } | ||
237 | |||
238 | static void | ||
239 | qxl_hide_cursor(struct qxl_device *qdev) | ||
240 | { | ||
241 | struct qxl_release *release; | ||
242 | struct qxl_cursor_cmd *cmd; | ||
243 | int ret; | ||
244 | |||
245 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | ||
246 | &release, NULL); | ||
247 | |||
248 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | ||
249 | cmd->type = QXL_CURSOR_HIDE; | ||
250 | qxl_release_unmap(qdev, release, &cmd->release_info); | ||
251 | |||
252 | qxl_fence_releaseable(qdev, release); | ||
253 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | ||
254 | qxl_release_unreserve(qdev, release); | ||
255 | } | ||
256 | |||
257 | static int qxl_crtc_cursor_set(struct drm_crtc *crtc, | ||
258 | struct drm_file *file_priv, | ||
259 | uint32_t handle, | ||
260 | uint32_t width, | ||
261 | uint32_t height) | ||
262 | { | ||
263 | struct drm_device *dev = crtc->dev; | ||
264 | struct qxl_device *qdev = dev->dev_private; | ||
265 | struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); | ||
266 | struct drm_gem_object *obj; | ||
267 | struct qxl_cursor *cursor; | ||
268 | struct qxl_cursor_cmd *cmd; | ||
269 | struct qxl_bo *cursor_bo, *user_bo; | ||
270 | struct qxl_release *release; | ||
271 | void *user_ptr; | ||
272 | |||
273 | int size = 64*64*4; | ||
274 | int ret = 0; | ||
275 | if (!handle) { | ||
276 | qxl_hide_cursor(qdev); | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); | ||
281 | if (!obj) { | ||
282 | DRM_ERROR("cannot find cursor object\n"); | ||
283 | return -ENOENT; | ||
284 | } | ||
285 | |||
286 | user_bo = gem_to_qxl_bo(obj); | ||
287 | |||
288 | ret = qxl_bo_reserve(user_bo, false); | ||
289 | if (ret) | ||
290 | goto out_unref; | ||
291 | |||
292 | ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); | ||
293 | if (ret) | ||
294 | goto out_unreserve; | ||
295 | |||
296 | ret = qxl_bo_kmap(user_bo, &user_ptr); | ||
297 | if (ret) | ||
298 | goto out_unpin; | ||
299 | |||
300 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), | ||
301 | QXL_RELEASE_CURSOR_CMD, | ||
302 | &release, NULL); | ||
303 | if (ret) | ||
304 | goto out_kunmap; | ||
305 | ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, | ||
306 | &cursor_bo); | ||
307 | if (ret) | ||
308 | goto out_free_release; | ||
309 | ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); | ||
310 | if (ret) | ||
311 | goto out_free_bo; | ||
312 | |||
313 | cursor->header.unique = 0; | ||
314 | cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; | ||
315 | cursor->header.width = 64; | ||
316 | cursor->header.height = 64; | ||
317 | cursor->header.hot_spot_x = 0; | ||
318 | cursor->header.hot_spot_y = 0; | ||
319 | cursor->data_size = size; | ||
320 | cursor->chunk.next_chunk = 0; | ||
321 | cursor->chunk.prev_chunk = 0; | ||
322 | cursor->chunk.data_size = size; | ||
323 | |||
324 | memcpy(cursor->chunk.data, user_ptr, size); | ||
325 | |||
326 | qxl_bo_kunmap(cursor_bo); | ||
327 | |||
328 | /* finish with the userspace bo */ | ||
329 | qxl_bo_kunmap(user_bo); | ||
330 | qxl_bo_unpin(user_bo); | ||
331 | qxl_bo_unreserve(user_bo); | ||
332 | drm_gem_object_unreference_unlocked(obj); | ||
333 | |||
334 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | ||
335 | cmd->type = QXL_CURSOR_SET; | ||
336 | cmd->u.set.position.x = qcrtc->cur_x; | ||
337 | cmd->u.set.position.y = qcrtc->cur_y; | ||
338 | |||
339 | cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); | ||
340 | qxl_release_add_res(qdev, release, cursor_bo); | ||
341 | |||
342 | cmd->u.set.visible = 1; | ||
343 | qxl_release_unmap(qdev, release, &cmd->release_info); | ||
344 | |||
345 | qxl_fence_releaseable(qdev, release); | ||
346 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | ||
347 | qxl_release_unreserve(qdev, release); | ||
348 | |||
349 | qxl_bo_unreserve(cursor_bo); | ||
350 | qxl_bo_unref(&cursor_bo); | ||
351 | |||
352 | return ret; | ||
353 | out_free_bo: | ||
354 | qxl_bo_unref(&cursor_bo); | ||
355 | out_free_release: | ||
356 | qxl_release_unreserve(qdev, release); | ||
357 | qxl_release_free(qdev, release); | ||
358 | out_kunmap: | ||
359 | qxl_bo_kunmap(user_bo); | ||
360 | out_unpin: | ||
361 | qxl_bo_unpin(user_bo); | ||
362 | out_unreserve: | ||
363 | qxl_bo_unreserve(user_bo); | ||
364 | out_unref: | ||
365 | drm_gem_object_unreference_unlocked(obj); | ||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | static int qxl_crtc_cursor_move(struct drm_crtc *crtc, | ||
370 | int x, int y) | ||
371 | { | ||
372 | struct drm_device *dev = crtc->dev; | ||
373 | struct qxl_device *qdev = dev->dev_private; | ||
374 | struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); | ||
375 | struct qxl_release *release; | ||
376 | struct qxl_cursor_cmd *cmd; | ||
377 | int ret; | ||
378 | |||
379 | ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, | ||
380 | &release, NULL); | ||
381 | |||
382 | qcrtc->cur_x = x; | ||
383 | qcrtc->cur_y = y; | ||
384 | |||
385 | cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); | ||
386 | cmd->type = QXL_CURSOR_MOVE; | ||
387 | cmd->u.position.x = qcrtc->cur_x; | ||
388 | cmd->u.position.y = qcrtc->cur_y; | ||
389 | qxl_release_unmap(qdev, release, &cmd->release_info); | ||
390 | |||
391 | qxl_fence_releaseable(qdev, release); | ||
392 | qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); | ||
393 | qxl_release_unreserve(qdev, release); | ||
394 | return 0; | ||
395 | } | ||
396 | |||
397 | |||
398 | static const struct drm_crtc_funcs qxl_crtc_funcs = { | ||
399 | .cursor_set = qxl_crtc_cursor_set, | ||
400 | .cursor_move = qxl_crtc_cursor_move, | ||
401 | .gamma_set = qxl_crtc_gamma_set, | ||
402 | .set_config = drm_crtc_helper_set_config, | ||
403 | .destroy = qxl_crtc_destroy, | ||
404 | }; | ||
405 | |||
406 | static void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) | ||
407 | { | ||
408 | struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); | ||
409 | |||
410 | if (qxl_fb->obj) | ||
411 | drm_gem_object_unreference_unlocked(qxl_fb->obj); | ||
412 | drm_framebuffer_cleanup(fb); | ||
413 | kfree(qxl_fb); | ||
414 | } | ||
415 | |||
416 | int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, | ||
417 | struct drm_file *file_priv, | ||
418 | unsigned flags, unsigned color, | ||
419 | struct drm_clip_rect *clips, | ||
420 | unsigned num_clips) | ||
421 | { | ||
422 | /* TODO: vmwgfx where this was cribbed from had locking. Why? */ | ||
423 | struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); | ||
424 | struct qxl_device *qdev = qxl_fb->base.dev->dev_private; | ||
425 | struct drm_clip_rect norect; | ||
426 | struct qxl_bo *qobj; | ||
427 | int inc = 1; | ||
428 | |||
429 | qobj = gem_to_qxl_bo(qxl_fb->obj); | ||
430 | if (qxl_fb != qdev->active_user_framebuffer) { | ||
431 | DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", | ||
432 | __func__, qxl_fb, qdev->active_user_framebuffer); | ||
433 | } | ||
434 | if (!num_clips) { | ||
435 | num_clips = 1; | ||
436 | clips = &norect; | ||
437 | norect.x1 = norect.y1 = 0; | ||
438 | norect.x2 = fb->width; | ||
439 | norect.y2 = fb->height; | ||
440 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
441 | num_clips /= 2; | ||
442 | inc = 2; /* skip source rects */ | ||
443 | } | ||
444 | |||
445 | qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, | ||
446 | clips, num_clips, inc); | ||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | static const struct drm_framebuffer_funcs qxl_fb_funcs = { | ||
451 | .destroy = qxl_user_framebuffer_destroy, | ||
452 | .dirty = qxl_framebuffer_surface_dirty, | ||
453 | /* TODO? | ||
454 | * .create_handle = qxl_user_framebuffer_create_handle, */ | ||
455 | }; | ||
456 | |||
457 | int | ||
458 | qxl_framebuffer_init(struct drm_device *dev, | ||
459 | struct qxl_framebuffer *qfb, | ||
460 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
461 | struct drm_gem_object *obj) | ||
462 | { | ||
463 | int ret; | ||
464 | |||
465 | qfb->obj = obj; | ||
466 | ret = drm_framebuffer_init(dev, &qfb->base, &qxl_fb_funcs); | ||
467 | if (ret) { | ||
468 | qfb->obj = NULL; | ||
469 | return ret; | ||
470 | } | ||
471 | drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd); | ||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | static void qxl_crtc_dpms(struct drm_crtc *crtc, int mode) | ||
476 | { | ||
477 | } | ||
478 | |||
479 | static bool qxl_crtc_mode_fixup(struct drm_crtc *crtc, | ||
480 | const struct drm_display_mode *mode, | ||
481 | struct drm_display_mode *adjusted_mode) | ||
482 | { | ||
483 | struct drm_device *dev = crtc->dev; | ||
484 | struct qxl_device *qdev = dev->dev_private; | ||
485 | |||
486 | qxl_io_log(qdev, "%s: (%d,%d) => (%d,%d)\n", | ||
487 | __func__, | ||
488 | mode->hdisplay, mode->vdisplay, | ||
489 | adjusted_mode->hdisplay, | ||
490 | adjusted_mode->vdisplay); | ||
491 | return true; | ||
492 | } | ||
493 | |||
494 | void | ||
495 | qxl_send_monitors_config(struct qxl_device *qdev) | ||
496 | { | ||
497 | int i; | ||
498 | |||
499 | BUG_ON(!qdev->ram_header->monitors_config); | ||
500 | |||
501 | if (qdev->monitors_config->count == 0) { | ||
502 | qxl_io_log(qdev, "%s: 0 monitors??\n", __func__); | ||
503 | return; | ||
504 | } | ||
505 | for (i = 0 ; i < qdev->monitors_config->count ; ++i) { | ||
506 | struct qxl_head *head = &qdev->monitors_config->heads[i]; | ||
507 | |||
508 | if (head->y > 8192 || head->y < head->x || | ||
509 | head->width > 8192 || head->height > 8192) { | ||
510 | DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", | ||
511 | i, head->width, head->height, | ||
512 | head->x, head->y); | ||
513 | return; | ||
514 | } | ||
515 | } | ||
516 | qxl_io_monitors_config(qdev); | ||
517 | } | ||
518 | |||
519 | static void qxl_monitors_config_set_single(struct qxl_device *qdev, | ||
520 | unsigned x, unsigned y, | ||
521 | unsigned width, unsigned height) | ||
522 | { | ||
523 | DRM_DEBUG("%dx%d+%d+%d\n", width, height, x, y); | ||
524 | qdev->monitors_config->count = 1; | ||
525 | qdev->monitors_config->heads[0].x = x; | ||
526 | qdev->monitors_config->heads[0].y = y; | ||
527 | qdev->monitors_config->heads[0].width = width; | ||
528 | qdev->monitors_config->heads[0].height = height; | ||
529 | } | ||
530 | |||
531 | static int qxl_crtc_mode_set(struct drm_crtc *crtc, | ||
532 | struct drm_display_mode *mode, | ||
533 | struct drm_display_mode *adjusted_mode, | ||
534 | int x, int y, | ||
535 | struct drm_framebuffer *old_fb) | ||
536 | { | ||
537 | struct drm_device *dev = crtc->dev; | ||
538 | struct qxl_device *qdev = dev->dev_private; | ||
539 | struct qxl_mode *m = (void *)mode->private; | ||
540 | struct qxl_framebuffer *qfb; | ||
541 | struct qxl_bo *bo, *old_bo = NULL; | ||
542 | uint32_t width, height, base_offset; | ||
543 | bool recreate_primary = false; | ||
544 | int ret; | ||
545 | |||
546 | if (!crtc->fb) { | ||
547 | DRM_DEBUG_KMS("No FB bound\n"); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | if (old_fb) { | ||
552 | qfb = to_qxl_framebuffer(old_fb); | ||
553 | old_bo = gem_to_qxl_bo(qfb->obj); | ||
554 | } | ||
555 | qfb = to_qxl_framebuffer(crtc->fb); | ||
556 | bo = gem_to_qxl_bo(qfb->obj); | ||
557 | if (!m) | ||
558 | /* and do we care? */ | ||
559 | DRM_DEBUG("%dx%d: not a native mode\n", x, y); | ||
560 | else | ||
561 | DRM_DEBUG("%dx%d: qxl id %d\n", | ||
562 | mode->hdisplay, mode->vdisplay, m->id); | ||
563 | DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n", | ||
564 | x, y, | ||
565 | mode->hdisplay, mode->vdisplay, | ||
566 | adjusted_mode->hdisplay, | ||
567 | adjusted_mode->vdisplay); | ||
568 | |||
569 | recreate_primary = true; | ||
570 | |||
571 | width = mode->hdisplay; | ||
572 | height = mode->vdisplay; | ||
573 | base_offset = 0; | ||
574 | |||
575 | ret = qxl_bo_reserve(bo, false); | ||
576 | if (ret != 0) | ||
577 | return ret; | ||
578 | ret = qxl_bo_pin(bo, bo->type, NULL); | ||
579 | if (ret != 0) { | ||
580 | qxl_bo_unreserve(bo); | ||
581 | return -EINVAL; | ||
582 | } | ||
583 | qxl_bo_unreserve(bo); | ||
584 | if (recreate_primary) { | ||
585 | qxl_io_destroy_primary(qdev); | ||
586 | qxl_io_log(qdev, | ||
587 | "recreate primary: %dx%d (was %dx%d,%d,%d)\n", | ||
588 | width, height, bo->surf.width, | ||
589 | bo->surf.height, bo->surf.stride, bo->surf.format); | ||
590 | qxl_io_create_primary(qdev, width, height, base_offset, bo); | ||
591 | bo->is_primary = true; | ||
592 | } | ||
593 | |||
594 | if (old_bo && old_bo != bo) { | ||
595 | old_bo->is_primary = false; | ||
596 | ret = qxl_bo_reserve(old_bo, false); | ||
597 | qxl_bo_unpin(old_bo); | ||
598 | qxl_bo_unreserve(old_bo); | ||
599 | } | ||
600 | |||
601 | if (qdev->monitors_config->count == 0) { | ||
602 | qxl_monitors_config_set_single(qdev, x, y, | ||
603 | mode->hdisplay, | ||
604 | mode->vdisplay); | ||
605 | } | ||
606 | qdev->mode_set = true; | ||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static void qxl_crtc_prepare(struct drm_crtc *crtc) | ||
611 | { | ||
612 | DRM_DEBUG("current: %dx%d+%d+%d (%d).\n", | ||
613 | crtc->mode.hdisplay, crtc->mode.vdisplay, | ||
614 | crtc->x, crtc->y, crtc->enabled); | ||
615 | } | ||
616 | |||
617 | static void qxl_crtc_commit(struct drm_crtc *crtc) | ||
618 | { | ||
619 | DRM_DEBUG("\n"); | ||
620 | } | ||
621 | |||
622 | void qxl_crtc_load_lut(struct drm_crtc *crtc) | ||
623 | { | ||
624 | DRM_DEBUG("\n"); | ||
625 | } | ||
626 | |||
627 | static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { | ||
628 | .dpms = qxl_crtc_dpms, | ||
629 | .mode_fixup = qxl_crtc_mode_fixup, | ||
630 | .mode_set = qxl_crtc_mode_set, | ||
631 | .prepare = qxl_crtc_prepare, | ||
632 | .commit = qxl_crtc_commit, | ||
633 | .load_lut = qxl_crtc_load_lut, | ||
634 | }; | ||
635 | |||
636 | int qdev_crtc_init(struct drm_device *dev, int num_crtc) | ||
637 | { | ||
638 | struct qxl_crtc *qxl_crtc; | ||
639 | |||
640 | qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL); | ||
641 | if (!qxl_crtc) | ||
642 | return -ENOMEM; | ||
643 | |||
644 | drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); | ||
645 | |||
646 | drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256); | ||
647 | drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); | ||
648 | return 0; | ||
649 | } | ||
650 | |||
651 | static void qxl_enc_dpms(struct drm_encoder *encoder, int mode) | ||
652 | { | ||
653 | DRM_DEBUG("\n"); | ||
654 | } | ||
655 | |||
656 | static bool qxl_enc_mode_fixup(struct drm_encoder *encoder, | ||
657 | const struct drm_display_mode *mode, | ||
658 | struct drm_display_mode *adjusted_mode) | ||
659 | { | ||
660 | DRM_DEBUG("\n"); | ||
661 | return true; | ||
662 | } | ||
663 | |||
664 | static void qxl_enc_prepare(struct drm_encoder *encoder) | ||
665 | { | ||
666 | DRM_DEBUG("\n"); | ||
667 | } | ||
668 | |||
669 | static void qxl_write_monitors_config_for_encoder(struct qxl_device *qdev, | ||
670 | struct drm_encoder *encoder) | ||
671 | { | ||
672 | int i; | ||
673 | struct qxl_head *head; | ||
674 | struct drm_display_mode *mode; | ||
675 | |||
676 | BUG_ON(!encoder); | ||
677 | /* TODO: ugly, do better */ | ||
678 | for (i = 0 ; (encoder->possible_crtcs != (1 << i)) && i < 32; ++i) | ||
679 | ; | ||
680 | if (encoder->possible_crtcs != (1 << i)) { | ||
681 | DRM_ERROR("encoder has wrong possible_crtcs: %x\n", | ||
682 | encoder->possible_crtcs); | ||
683 | return; | ||
684 | } | ||
685 | if (!qdev->monitors_config || | ||
686 | qdev->monitors_config->max_allowed <= i) { | ||
687 | DRM_ERROR( | ||
688 | "head number too large or missing monitors config: %p, %d", | ||
689 | qdev->monitors_config, | ||
690 | qdev->monitors_config ? | ||
691 | qdev->monitors_config->max_allowed : -1); | ||
692 | return; | ||
693 | } | ||
694 | if (!encoder->crtc) { | ||
695 | DRM_ERROR("missing crtc on encoder %p\n", encoder); | ||
696 | return; | ||
697 | } | ||
698 | if (i != 0) | ||
699 | DRM_DEBUG("missing for multiple monitors: no head holes\n"); | ||
700 | head = &qdev->monitors_config->heads[i]; | ||
701 | head->id = i; | ||
702 | head->surface_id = 0; | ||
703 | if (encoder->crtc->enabled) { | ||
704 | mode = &encoder->crtc->mode; | ||
705 | head->width = mode->hdisplay; | ||
706 | head->height = mode->vdisplay; | ||
707 | head->x = encoder->crtc->x; | ||
708 | head->y = encoder->crtc->y; | ||
709 | if (qdev->monitors_config->count < i + 1) | ||
710 | qdev->monitors_config->count = i + 1; | ||
711 | } else { | ||
712 | head->width = 0; | ||
713 | head->height = 0; | ||
714 | head->x = 0; | ||
715 | head->y = 0; | ||
716 | } | ||
717 | DRM_DEBUG("setting head %d to +%d+%d %dx%d\n", | ||
718 | i, head->x, head->y, head->width, head->height); | ||
719 | head->flags = 0; | ||
720 | /* TODO - somewhere else to call this for multiple monitors | ||
721 | * (config_commit?) */ | ||
722 | qxl_send_monitors_config(qdev); | ||
723 | } | ||
724 | |||
725 | static void qxl_enc_commit(struct drm_encoder *encoder) | ||
726 | { | ||
727 | struct qxl_device *qdev = encoder->dev->dev_private; | ||
728 | |||
729 | qxl_write_monitors_config_for_encoder(qdev, encoder); | ||
730 | DRM_DEBUG("\n"); | ||
731 | } | ||
732 | |||
733 | static void qxl_enc_mode_set(struct drm_encoder *encoder, | ||
734 | struct drm_display_mode *mode, | ||
735 | struct drm_display_mode *adjusted_mode) | ||
736 | { | ||
737 | DRM_DEBUG("\n"); | ||
738 | } | ||
739 | |||
740 | static int qxl_conn_get_modes(struct drm_connector *connector) | ||
741 | { | ||
742 | int ret = 0; | ||
743 | struct qxl_device *qdev = connector->dev->dev_private; | ||
744 | |||
745 | DRM_DEBUG_KMS("monitors_config=%p\n", qdev->monitors_config); | ||
746 | /* TODO: what should we do here? only show the configured modes for the | ||
747 | * device, or allow the full list, or both? */ | ||
748 | if (qdev->monitors_config && qdev->monitors_config->count) { | ||
749 | ret = qxl_add_monitors_config_modes(connector); | ||
750 | if (ret < 0) | ||
751 | return ret; | ||
752 | } | ||
753 | ret += qxl_add_common_modes(connector); | ||
754 | return ret; | ||
755 | } | ||
756 | |||
757 | static int qxl_conn_mode_valid(struct drm_connector *connector, | ||
758 | struct drm_display_mode *mode) | ||
759 | { | ||
760 | /* TODO: is this called for user defined modes? (xrandr --add-mode) | ||
761 | * TODO: check that the mode fits in the framebuffer */ | ||
762 | DRM_DEBUG("%s: %dx%d status=%d\n", mode->name, mode->hdisplay, | ||
763 | mode->vdisplay, mode->status); | ||
764 | return MODE_OK; | ||
765 | } | ||
766 | |||
767 | struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) | ||
768 | { | ||
769 | struct qxl_output *qxl_output = | ||
770 | drm_connector_to_qxl_output(connector); | ||
771 | |||
772 | DRM_DEBUG("\n"); | ||
773 | return &qxl_output->enc; | ||
774 | } | ||
775 | |||
776 | |||
777 | static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = { | ||
778 | .dpms = qxl_enc_dpms, | ||
779 | .mode_fixup = qxl_enc_mode_fixup, | ||
780 | .prepare = qxl_enc_prepare, | ||
781 | .mode_set = qxl_enc_mode_set, | ||
782 | .commit = qxl_enc_commit, | ||
783 | }; | ||
784 | |||
785 | static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = { | ||
786 | .get_modes = qxl_conn_get_modes, | ||
787 | .mode_valid = qxl_conn_mode_valid, | ||
788 | .best_encoder = qxl_best_encoder, | ||
789 | }; | ||
790 | |||
791 | static void qxl_conn_save(struct drm_connector *connector) | ||
792 | { | ||
793 | DRM_DEBUG("\n"); | ||
794 | } | ||
795 | |||
796 | static void qxl_conn_restore(struct drm_connector *connector) | ||
797 | { | ||
798 | DRM_DEBUG("\n"); | ||
799 | } | ||
800 | |||
801 | static enum drm_connector_status qxl_conn_detect( | ||
802 | struct drm_connector *connector, | ||
803 | bool force) | ||
804 | { | ||
805 | struct qxl_output *output = | ||
806 | drm_connector_to_qxl_output(connector); | ||
807 | struct drm_device *ddev = connector->dev; | ||
808 | struct qxl_device *qdev = ddev->dev_private; | ||
809 | int connected; | ||
810 | |||
811 | /* The first monitor is always connected */ | ||
812 | connected = (output->index == 0) || | ||
813 | (qdev->monitors_config && | ||
814 | qdev->monitors_config->count > output->index); | ||
815 | |||
816 | DRM_DEBUG("\n"); | ||
817 | return connected ? connector_status_connected | ||
818 | : connector_status_disconnected; | ||
819 | } | ||
820 | |||
821 | static int qxl_conn_set_property(struct drm_connector *connector, | ||
822 | struct drm_property *property, | ||
823 | uint64_t value) | ||
824 | { | ||
825 | DRM_DEBUG("\n"); | ||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | static void qxl_conn_destroy(struct drm_connector *connector) | ||
830 | { | ||
831 | struct qxl_output *qxl_output = | ||
832 | drm_connector_to_qxl_output(connector); | ||
833 | |||
834 | drm_sysfs_connector_remove(connector); | ||
835 | drm_connector_cleanup(connector); | ||
836 | kfree(qxl_output); | ||
837 | } | ||
838 | |||
839 | static const struct drm_connector_funcs qxl_connector_funcs = { | ||
840 | .dpms = drm_helper_connector_dpms, | ||
841 | .save = qxl_conn_save, | ||
842 | .restore = qxl_conn_restore, | ||
843 | .detect = qxl_conn_detect, | ||
844 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
845 | .set_property = qxl_conn_set_property, | ||
846 | .destroy = qxl_conn_destroy, | ||
847 | }; | ||
848 | |||
849 | static void qxl_enc_destroy(struct drm_encoder *encoder) | ||
850 | { | ||
851 | drm_encoder_cleanup(encoder); | ||
852 | } | ||
853 | |||
854 | static const struct drm_encoder_funcs qxl_enc_funcs = { | ||
855 | .destroy = qxl_enc_destroy, | ||
856 | }; | ||
857 | |||
858 | int qdev_output_init(struct drm_device *dev, int num_output) | ||
859 | { | ||
860 | struct qxl_output *qxl_output; | ||
861 | struct drm_connector *connector; | ||
862 | struct drm_encoder *encoder; | ||
863 | |||
864 | qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL); | ||
865 | if (!qxl_output) | ||
866 | return -ENOMEM; | ||
867 | |||
868 | qxl_output->index = num_output; | ||
869 | |||
870 | connector = &qxl_output->base; | ||
871 | encoder = &qxl_output->enc; | ||
872 | drm_connector_init(dev, &qxl_output->base, | ||
873 | &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); | ||
874 | |||
875 | drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs, | ||
876 | DRM_MODE_ENCODER_VIRTUAL); | ||
877 | |||
878 | encoder->possible_crtcs = 1 << num_output; | ||
879 | drm_mode_connector_attach_encoder(&qxl_output->base, | ||
880 | &qxl_output->enc); | ||
881 | drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs); | ||
882 | drm_connector_helper_add(connector, &qxl_connector_helper_funcs); | ||
883 | |||
884 | drm_sysfs_connector_add(connector); | ||
885 | return 0; | ||
886 | } | ||
887 | |||
888 | static struct drm_framebuffer * | ||
889 | qxl_user_framebuffer_create(struct drm_device *dev, | ||
890 | struct drm_file *file_priv, | ||
891 | struct drm_mode_fb_cmd2 *mode_cmd) | ||
892 | { | ||
893 | struct drm_gem_object *obj; | ||
894 | struct qxl_framebuffer *qxl_fb; | ||
895 | struct qxl_device *qdev = dev->dev_private; | ||
896 | int ret; | ||
897 | |||
898 | obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); | ||
899 | |||
900 | qxl_fb = kzalloc(sizeof(*qxl_fb), GFP_KERNEL); | ||
901 | if (qxl_fb == NULL) | ||
902 | return NULL; | ||
903 | |||
904 | ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj); | ||
905 | if (ret) { | ||
906 | kfree(qxl_fb); | ||
907 | drm_gem_object_unreference_unlocked(obj); | ||
908 | return NULL; | ||
909 | } | ||
910 | |||
911 | if (qdev->active_user_framebuffer) { | ||
912 | DRM_INFO("%s: active_user_framebuffer %p -> %p\n", | ||
913 | __func__, | ||
914 | qdev->active_user_framebuffer, qxl_fb); | ||
915 | } | ||
916 | qdev->active_user_framebuffer = qxl_fb; | ||
917 | |||
918 | return &qxl_fb->base; | ||
919 | } | ||
920 | |||
921 | static const struct drm_mode_config_funcs qxl_mode_funcs = { | ||
922 | .fb_create = qxl_user_framebuffer_create, | ||
923 | }; | ||
924 | |||
925 | int qxl_modeset_init(struct qxl_device *qdev) | ||
926 | { | ||
927 | int i; | ||
928 | int ret; | ||
929 | struct drm_gem_object *gobj; | ||
930 | int max_allowed = QXL_NUM_OUTPUTS; | ||
931 | int monitors_config_size = sizeof(struct qxl_monitors_config) + | ||
932 | max_allowed * sizeof(struct qxl_head); | ||
933 | |||
934 | drm_mode_config_init(qdev->ddev); | ||
935 | ret = qxl_gem_object_create(qdev, monitors_config_size, 0, | ||
936 | QXL_GEM_DOMAIN_VRAM, | ||
937 | false, false, NULL, &gobj); | ||
938 | if (ret) { | ||
939 | DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); | ||
940 | return -ENOMEM; | ||
941 | } | ||
942 | qdev->monitors_config_bo = gem_to_qxl_bo(gobj); | ||
943 | qxl_bo_kmap(qdev->monitors_config_bo, NULL); | ||
944 | qdev->monitors_config = qdev->monitors_config_bo->kptr; | ||
945 | qdev->ram_header->monitors_config = | ||
946 | qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); | ||
947 | |||
948 | memset(qdev->monitors_config, 0, monitors_config_size); | ||
949 | qdev->monitors_config->max_allowed = max_allowed; | ||
950 | |||
951 | qdev->ddev->mode_config.funcs = (void *)&qxl_mode_funcs; | ||
952 | |||
953 | /* modes will be validated against the framebuffer size */ | ||
954 | qdev->ddev->mode_config.min_width = 320; | ||
955 | qdev->ddev->mode_config.min_height = 200; | ||
956 | qdev->ddev->mode_config.max_width = 8192; | ||
957 | qdev->ddev->mode_config.max_height = 8192; | ||
958 | |||
959 | qdev->ddev->mode_config.fb_base = qdev->vram_base; | ||
960 | for (i = 0 ; i < QXL_NUM_OUTPUTS; ++i) { | ||
961 | qdev_crtc_init(qdev->ddev, i); | ||
962 | qdev_output_init(qdev->ddev, i); | ||
963 | } | ||
964 | |||
965 | qdev->mode_info.mode_config_initialized = true; | ||
966 | |||
967 | /* primary surface must be created by this point, to allow | ||
968 | * issuing command queue commands and having them read by | ||
969 | * spice server. */ | ||
970 | qxl_fbdev_init(qdev); | ||
971 | return 0; | ||
972 | } | ||
973 | |||
974 | void qxl_modeset_fini(struct qxl_device *qdev) | ||
975 | { | ||
976 | qxl_fbdev_fini(qdev); | ||
977 | if (qdev->mode_info.mode_config_initialized) { | ||
978 | drm_mode_config_cleanup(qdev->ddev); | ||
979 | qdev->mode_info.mode_config_initialized = false; | ||
980 | } | ||
981 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c new file mode 100644 index 000000000000..3c8c3dbf9378 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_draw.c | |||
@@ -0,0 +1,390 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * on the rights to use, copy, modify, merge, publish, distribute, sub | ||
8 | * license, and/or sell copies of the Software, and to permit persons to whom | ||
9 | * the Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | ||
19 | * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
20 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | |||
23 | #include "qxl_drv.h" | ||
24 | #include "qxl_object.h" | ||
25 | |||
26 | /* returns a pointer to the already allocated qxl_rect array inside | ||
27 | * the qxl_clip_rects. This is *not* the same as the memory allocated | ||
28 | * on the device, it is offset to qxl_clip_rects.chunk.data */ | ||
29 | static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, | ||
30 | struct qxl_drawable *drawable, | ||
31 | unsigned num_clips, | ||
32 | struct qxl_bo **clips_bo, | ||
33 | struct qxl_release *release) | ||
34 | { | ||
35 | struct qxl_clip_rects *dev_clips; | ||
36 | int ret; | ||
37 | int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; | ||
38 | ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); | ||
39 | if (ret) | ||
40 | return NULL; | ||
41 | |||
42 | ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); | ||
43 | if (ret) { | ||
44 | qxl_bo_unref(clips_bo); | ||
45 | return NULL; | ||
46 | } | ||
47 | dev_clips->num_rects = num_clips; | ||
48 | dev_clips->chunk.next_chunk = 0; | ||
49 | dev_clips->chunk.prev_chunk = 0; | ||
50 | dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips; | ||
51 | return (struct qxl_rect *)dev_clips->chunk.data; | ||
52 | } | ||
53 | |||
54 | static int | ||
55 | make_drawable(struct qxl_device *qdev, int surface, uint8_t type, | ||
56 | const struct qxl_rect *rect, | ||
57 | struct qxl_release **release) | ||
58 | { | ||
59 | struct qxl_drawable *drawable; | ||
60 | int i, ret; | ||
61 | |||
62 | ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), | ||
63 | QXL_RELEASE_DRAWABLE, release, | ||
64 | NULL); | ||
65 | if (ret) | ||
66 | return ret; | ||
67 | |||
68 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); | ||
69 | drawable->type = type; | ||
70 | |||
71 | drawable->surface_id = surface; /* Only primary for now */ | ||
72 | drawable->effect = QXL_EFFECT_OPAQUE; | ||
73 | drawable->self_bitmap = 0; | ||
74 | drawable->self_bitmap_area.top = 0; | ||
75 | drawable->self_bitmap_area.left = 0; | ||
76 | drawable->self_bitmap_area.bottom = 0; | ||
77 | drawable->self_bitmap_area.right = 0; | ||
78 | /* FIXME: add clipping */ | ||
79 | drawable->clip.type = SPICE_CLIP_TYPE_NONE; | ||
80 | |||
81 | /* | ||
82 | * surfaces_dest[i] should apparently be filled out with the | ||
83 | * surfaces that we depend on, and surface_rects should be | ||
84 | * filled with the rectangles of those surfaces that we | ||
85 | * are going to use. | ||
86 | */ | ||
87 | for (i = 0; i < 3; ++i) | ||
88 | drawable->surfaces_dest[i] = -1; | ||
89 | |||
90 | if (rect) | ||
91 | drawable->bbox = *rect; | ||
92 | |||
93 | drawable->mm_time = qdev->rom->mm_clock; | ||
94 | qxl_release_unmap(qdev, *release, &drawable->release_info); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, | ||
99 | const struct qxl_fb_image *qxl_fb_image) | ||
100 | { | ||
101 | struct qxl_device *qdev = qxl_fb_image->qdev; | ||
102 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; | ||
103 | uint32_t visual = qxl_fb_image->visual; | ||
104 | const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; | ||
105 | struct qxl_palette *pal; | ||
106 | int ret; | ||
107 | uint32_t fgcolor, bgcolor; | ||
108 | static uint64_t unique; /* we make no attempt to actually set this | ||
109 | * correctly globaly, since that would require | ||
110 | * tracking all of our palettes. */ | ||
111 | |||
112 | ret = qxl_alloc_bo_reserved(qdev, | ||
113 | sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, | ||
114 | palette_bo); | ||
115 | |||
116 | ret = qxl_bo_kmap(*palette_bo, (void **)&pal); | ||
117 | pal->num_ents = 2; | ||
118 | pal->unique = unique++; | ||
119 | if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { | ||
120 | /* NB: this is the only used branch currently. */ | ||
121 | fgcolor = pseudo_palette[fb_image->fg_color]; | ||
122 | bgcolor = pseudo_palette[fb_image->bg_color]; | ||
123 | } else { | ||
124 | fgcolor = fb_image->fg_color; | ||
125 | bgcolor = fb_image->bg_color; | ||
126 | } | ||
127 | pal->ents[0] = bgcolor; | ||
128 | pal->ents[1] = fgcolor; | ||
129 | qxl_bo_kunmap(*palette_bo); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | ||
134 | int stride /* filled in if 0 */) | ||
135 | { | ||
136 | struct qxl_device *qdev = qxl_fb_image->qdev; | ||
137 | struct qxl_drawable *drawable; | ||
138 | struct qxl_rect rect; | ||
139 | const struct fb_image *fb_image = &qxl_fb_image->fb_image; | ||
140 | int x = fb_image->dx; | ||
141 | int y = fb_image->dy; | ||
142 | int width = fb_image->width; | ||
143 | int height = fb_image->height; | ||
144 | const char *src = fb_image->data; | ||
145 | int depth = fb_image->depth; | ||
146 | struct qxl_release *release; | ||
147 | struct qxl_bo *image_bo; | ||
148 | struct qxl_image *image; | ||
149 | int ret; | ||
150 | |||
151 | if (stride == 0) | ||
152 | stride = depth * width / 8; | ||
153 | |||
154 | rect.left = x; | ||
155 | rect.right = x + width; | ||
156 | rect.top = y; | ||
157 | rect.bottom = y + height; | ||
158 | |||
159 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); | ||
160 | if (ret) | ||
161 | return; | ||
162 | |||
163 | ret = qxl_image_create(qdev, release, &image_bo, | ||
164 | (const uint8_t *)src, 0, 0, | ||
165 | width, height, depth, stride); | ||
166 | if (ret) { | ||
167 | qxl_release_unreserve(qdev, release); | ||
168 | qxl_release_free(qdev, release); | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | if (depth == 1) { | ||
173 | struct qxl_bo *palette_bo; | ||
174 | void *ptr; | ||
175 | ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); | ||
176 | qxl_release_add_res(qdev, release, palette_bo); | ||
177 | |||
178 | ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); | ||
179 | image = ptr; | ||
180 | image->u.bitmap.palette = | ||
181 | qxl_bo_physical_address(qdev, palette_bo, 0); | ||
182 | qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); | ||
183 | qxl_bo_unreserve(palette_bo); | ||
184 | qxl_bo_unref(&palette_bo); | ||
185 | } | ||
186 | |||
187 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | ||
188 | |||
189 | drawable->u.copy.src_area.top = 0; | ||
190 | drawable->u.copy.src_area.bottom = height; | ||
191 | drawable->u.copy.src_area.left = 0; | ||
192 | drawable->u.copy.src_area.right = width; | ||
193 | |||
194 | drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT; | ||
195 | drawable->u.copy.scale_mode = 0; | ||
196 | drawable->u.copy.mask.flags = 0; | ||
197 | drawable->u.copy.mask.pos.x = 0; | ||
198 | drawable->u.copy.mask.pos.y = 0; | ||
199 | drawable->u.copy.mask.bitmap = 0; | ||
200 | |||
201 | drawable->u.copy.src_bitmap = | ||
202 | qxl_bo_physical_address(qdev, image_bo, 0); | ||
203 | qxl_release_unmap(qdev, release, &drawable->release_info); | ||
204 | |||
205 | qxl_release_add_res(qdev, release, image_bo); | ||
206 | qxl_bo_unreserve(image_bo); | ||
207 | qxl_bo_unref(&image_bo); | ||
208 | |||
209 | qxl_fence_releaseable(qdev, release); | ||
210 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | ||
211 | qxl_release_unreserve(qdev, release); | ||
212 | } | ||
213 | |||
214 | /* push a draw command using the given clipping rectangles as | ||
215 | * the sources from the shadow framebuffer. | ||
216 | * | ||
217 | * Right now implementing with a single draw and a clip list. Clip | ||
218 | * lists are known to be a problem performance wise, this can be solved | ||
219 | * by treating them differently in the server. | ||
220 | */ | ||
221 | void qxl_draw_dirty_fb(struct qxl_device *qdev, | ||
222 | struct qxl_framebuffer *qxl_fb, | ||
223 | struct qxl_bo *bo, | ||
224 | unsigned flags, unsigned color, | ||
225 | struct drm_clip_rect *clips, | ||
226 | unsigned num_clips, int inc) | ||
227 | { | ||
228 | /* | ||
229 | * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should | ||
230 | * send a fill command instead, much cheaper. | ||
231 | * | ||
232 | * See include/drm/drm_mode.h | ||
233 | */ | ||
234 | struct drm_clip_rect *clips_ptr; | ||
235 | int i; | ||
236 | int left, right, top, bottom; | ||
237 | int width, height; | ||
238 | struct qxl_drawable *drawable; | ||
239 | struct qxl_rect drawable_rect; | ||
240 | struct qxl_rect *rects; | ||
241 | int stride = qxl_fb->base.pitches[0]; | ||
242 | /* depth is not actually interesting, we don't mask with it */ | ||
243 | int depth = qxl_fb->base.bits_per_pixel; | ||
244 | uint8_t *surface_base; | ||
245 | struct qxl_release *release; | ||
246 | struct qxl_bo *image_bo; | ||
247 | struct qxl_bo *clips_bo; | ||
248 | int ret; | ||
249 | |||
250 | left = clips->x1; | ||
251 | right = clips->x2; | ||
252 | top = clips->y1; | ||
253 | bottom = clips->y2; | ||
254 | |||
255 | /* skip the first clip rect */ | ||
256 | for (i = 1, clips_ptr = clips + inc; | ||
257 | i < num_clips; i++, clips_ptr += inc) { | ||
258 | left = min_t(int, left, (int)clips_ptr->x1); | ||
259 | right = max_t(int, right, (int)clips_ptr->x2); | ||
260 | top = min_t(int, top, (int)clips_ptr->y1); | ||
261 | bottom = max_t(int, bottom, (int)clips_ptr->y2); | ||
262 | } | ||
263 | |||
264 | width = right - left; | ||
265 | height = bottom - top; | ||
266 | drawable_rect.left = left; | ||
267 | drawable_rect.right = right; | ||
268 | drawable_rect.top = top; | ||
269 | drawable_rect.bottom = bottom; | ||
270 | ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, | ||
271 | &release); | ||
272 | if (ret) | ||
273 | return; | ||
274 | |||
275 | ret = qxl_bo_kmap(bo, (void **)&surface_base); | ||
276 | if (ret) | ||
277 | goto out_unref; | ||
278 | |||
279 | ret = qxl_image_create(qdev, release, &image_bo, surface_base, | ||
280 | left, top, width, height, depth, stride); | ||
281 | qxl_bo_kunmap(bo); | ||
282 | if (ret) | ||
283 | goto out_unref; | ||
284 | |||
285 | rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); | ||
286 | if (!rects) { | ||
287 | qxl_bo_unref(&image_bo); | ||
288 | goto out_unref; | ||
289 | } | ||
290 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | ||
291 | |||
292 | drawable->clip.type = SPICE_CLIP_TYPE_RECTS; | ||
293 | drawable->clip.data = qxl_bo_physical_address(qdev, | ||
294 | clips_bo, 0); | ||
295 | qxl_release_add_res(qdev, release, clips_bo); | ||
296 | |||
297 | drawable->u.copy.src_area.top = 0; | ||
298 | drawable->u.copy.src_area.bottom = height; | ||
299 | drawable->u.copy.src_area.left = 0; | ||
300 | drawable->u.copy.src_area.right = width; | ||
301 | |||
302 | drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT; | ||
303 | drawable->u.copy.scale_mode = 0; | ||
304 | drawable->u.copy.mask.flags = 0; | ||
305 | drawable->u.copy.mask.pos.x = 0; | ||
306 | drawable->u.copy.mask.pos.y = 0; | ||
307 | drawable->u.copy.mask.bitmap = 0; | ||
308 | |||
309 | drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); | ||
310 | qxl_release_unmap(qdev, release, &drawable->release_info); | ||
311 | qxl_release_add_res(qdev, release, image_bo); | ||
312 | qxl_bo_unreserve(image_bo); | ||
313 | qxl_bo_unref(&image_bo); | ||
314 | clips_ptr = clips; | ||
315 | for (i = 0; i < num_clips; i++, clips_ptr += inc) { | ||
316 | rects[i].left = clips_ptr->x1; | ||
317 | rects[i].right = clips_ptr->x2; | ||
318 | rects[i].top = clips_ptr->y1; | ||
319 | rects[i].bottom = clips_ptr->y2; | ||
320 | } | ||
321 | qxl_bo_kunmap(clips_bo); | ||
322 | qxl_bo_unreserve(clips_bo); | ||
323 | qxl_bo_unref(&clips_bo); | ||
324 | |||
325 | qxl_fence_releaseable(qdev, release); | ||
326 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | ||
327 | qxl_release_unreserve(qdev, release); | ||
328 | return; | ||
329 | |||
330 | out_unref: | ||
331 | qxl_release_unreserve(qdev, release); | ||
332 | qxl_release_free(qdev, release); | ||
333 | } | ||
334 | |||
335 | void qxl_draw_copyarea(struct qxl_device *qdev, | ||
336 | u32 width, u32 height, | ||
337 | u32 sx, u32 sy, | ||
338 | u32 dx, u32 dy) | ||
339 | { | ||
340 | struct qxl_drawable *drawable; | ||
341 | struct qxl_rect rect; | ||
342 | struct qxl_release *release; | ||
343 | int ret; | ||
344 | |||
345 | rect.left = dx; | ||
346 | rect.top = dy; | ||
347 | rect.right = dx + width; | ||
348 | rect.bottom = dy + height; | ||
349 | ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); | ||
350 | if (ret) | ||
351 | return; | ||
352 | |||
353 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | ||
354 | drawable->u.copy_bits.src_pos.x = sx; | ||
355 | drawable->u.copy_bits.src_pos.y = sy; | ||
356 | |||
357 | qxl_release_unmap(qdev, release, &drawable->release_info); | ||
358 | qxl_fence_releaseable(qdev, release); | ||
359 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | ||
360 | qxl_release_unreserve(qdev, release); | ||
361 | } | ||
362 | |||
363 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) | ||
364 | { | ||
365 | struct qxl_device *qdev = qxl_draw_fill_rec->qdev; | ||
366 | struct qxl_rect rect = qxl_draw_fill_rec->rect; | ||
367 | uint32_t color = qxl_draw_fill_rec->color; | ||
368 | uint16_t rop = qxl_draw_fill_rec->rop; | ||
369 | struct qxl_drawable *drawable; | ||
370 | struct qxl_release *release; | ||
371 | int ret; | ||
372 | |||
373 | ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); | ||
374 | if (ret) | ||
375 | return; | ||
376 | |||
377 | drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); | ||
378 | drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; | ||
379 | drawable->u.fill.brush.u.color = color; | ||
380 | drawable->u.fill.rop_descriptor = rop; | ||
381 | drawable->u.fill.mask.flags = 0; | ||
382 | drawable->u.fill.mask.pos.x = 0; | ||
383 | drawable->u.fill.mask.pos.y = 0; | ||
384 | drawable->u.fill.mask.bitmap = 0; | ||
385 | |||
386 | qxl_release_unmap(qdev, release, &drawable->release_info); | ||
387 | qxl_fence_releaseable(qdev, release); | ||
388 | qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); | ||
389 | qxl_release_unreserve(qdev, release); | ||
390 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c new file mode 100644 index 000000000000..d337da0a9759 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_drv.c | |||
@@ -0,0 +1,145 @@ | |||
1 | /* vim: set ts=8 sw=8 tw=78 ai noexpandtab */ | ||
2 | /* qxl_drv.c -- QXL driver -*- linux-c -*- | ||
3 | * | ||
4 | * Copyright 2011 Red Hat, Inc. | ||
5 | * All Rights Reserved. | ||
6 | * | ||
7 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
8 | * copy of this software and associated documentation files (the "Software"), | ||
9 | * to deal in the Software without restriction, including without limitation | ||
10 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
11 | * and/or sell copies of the Software, and to permit persons to whom the | ||
12 | * Software is furnished to do so, subject to the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the next | ||
15 | * paragraph) shall be included in all copies or substantial portions of the | ||
16 | * Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
21 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
24 | * OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | * Authors: | ||
27 | * Dave Airlie <airlie@redhat.com> | ||
28 | * Alon Levy <alevy@redhat.com> | ||
29 | */ | ||
30 | |||
31 | #include <linux/module.h> | ||
32 | #include <linux/console.h> | ||
33 | |||
34 | #include "drmP.h" | ||
35 | #include "drm/drm.h" | ||
36 | |||
37 | #include "qxl_drv.h" | ||
38 | |||
39 | extern int qxl_max_ioctls; | ||
40 | static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { | ||
41 | { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, | ||
42 | 0xffff00, 0 }, | ||
43 | { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8, | ||
44 | 0xffff00, 0 }, | ||
45 | { 0, 0, 0 }, | ||
46 | }; | ||
47 | MODULE_DEVICE_TABLE(pci, pciidlist); | ||
48 | |||
49 | int qxl_modeset = -1; | ||
50 | |||
51 | MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); | ||
52 | module_param_named(modeset, qxl_modeset, int, 0400); | ||
53 | |||
54 | static struct drm_driver qxl_driver; | ||
55 | static struct pci_driver qxl_pci_driver; | ||
56 | |||
57 | static int | ||
58 | qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
59 | { | ||
60 | if (pdev->revision < 4) { | ||
61 | DRM_ERROR("qxl too old, doesn't support client_monitors_config," | ||
62 | " use xf86-video-qxl in user mode"); | ||
63 | return -EINVAL; /* TODO: ENODEV ? */ | ||
64 | } | ||
65 | return drm_get_pci_dev(pdev, ent, &qxl_driver); | ||
66 | } | ||
67 | |||
68 | static void | ||
69 | qxl_pci_remove(struct pci_dev *pdev) | ||
70 | { | ||
71 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
72 | |||
73 | drm_put_dev(dev); | ||
74 | } | ||
75 | |||
76 | static struct pci_driver qxl_pci_driver = { | ||
77 | .name = DRIVER_NAME, | ||
78 | .id_table = pciidlist, | ||
79 | .probe = qxl_pci_probe, | ||
80 | .remove = qxl_pci_remove, | ||
81 | }; | ||
82 | |||
83 | static const struct file_operations qxl_fops = { | ||
84 | .owner = THIS_MODULE, | ||
85 | .open = drm_open, | ||
86 | .release = drm_release, | ||
87 | .unlocked_ioctl = drm_ioctl, | ||
88 | .poll = drm_poll, | ||
89 | .fasync = drm_fasync, | ||
90 | .mmap = qxl_mmap, | ||
91 | }; | ||
92 | |||
93 | static struct drm_driver qxl_driver = { | ||
94 | .driver_features = DRIVER_GEM | DRIVER_MODESET | | ||
95 | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, | ||
96 | .dev_priv_size = 0, | ||
97 | .load = qxl_driver_load, | ||
98 | .unload = qxl_driver_unload, | ||
99 | |||
100 | .dumb_create = qxl_mode_dumb_create, | ||
101 | .dumb_map_offset = qxl_mode_dumb_mmap, | ||
102 | .dumb_destroy = qxl_mode_dumb_destroy, | ||
103 | #if defined(CONFIG_DEBUG_FS) | ||
104 | .debugfs_init = qxl_debugfs_init, | ||
105 | .debugfs_cleanup = qxl_debugfs_takedown, | ||
106 | #endif | ||
107 | .gem_init_object = qxl_gem_object_init, | ||
108 | .gem_free_object = qxl_gem_object_free, | ||
109 | .gem_open_object = qxl_gem_object_open, | ||
110 | .gem_close_object = qxl_gem_object_close, | ||
111 | .fops = &qxl_fops, | ||
112 | .ioctls = qxl_ioctls, | ||
113 | .irq_handler = qxl_irq_handler, | ||
114 | .name = DRIVER_NAME, | ||
115 | .desc = DRIVER_DESC, | ||
116 | .date = DRIVER_DATE, | ||
117 | .major = 0, | ||
118 | .minor = 1, | ||
119 | .patchlevel = 0, | ||
120 | }; | ||
121 | |||
122 | static int __init qxl_init(void) | ||
123 | { | ||
124 | #ifdef CONFIG_VGA_CONSOLE | ||
125 | if (vgacon_text_force() && qxl_modeset == -1) | ||
126 | return -EINVAL; | ||
127 | #endif | ||
128 | |||
129 | if (qxl_modeset == 0) | ||
130 | return -EINVAL; | ||
131 | qxl_driver.num_ioctls = qxl_max_ioctls; | ||
132 | return drm_pci_init(&qxl_driver, &qxl_pci_driver); | ||
133 | } | ||
134 | |||
135 | static void __exit qxl_exit(void) | ||
136 | { | ||
137 | drm_pci_exit(&qxl_driver, &qxl_pci_driver); | ||
138 | } | ||
139 | |||
140 | module_init(qxl_init); | ||
141 | module_exit(qxl_exit); | ||
142 | |||
143 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
144 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
145 | MODULE_LICENSE("GPL and additional rights"); | ||
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h new file mode 100644 index 000000000000..52b582c211da --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_drv.h | |||
@@ -0,0 +1,566 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | |||
27 | #ifndef QXL_DRV_H | ||
28 | #define QXL_DRV_H | ||
29 | |||
30 | /* | ||
31 | * Definitions taken from spice-protocol, plus kernel driver specific bits. | ||
32 | */ | ||
33 | |||
34 | #include <linux/workqueue.h> | ||
35 | #include <linux/firmware.h> | ||
36 | #include <linux/platform_device.h> | ||
37 | |||
38 | #include "drmP.h" | ||
39 | #include "drm_crtc.h" | ||
40 | #include <ttm/ttm_bo_api.h> | ||
41 | #include <ttm/ttm_bo_driver.h> | ||
42 | #include <ttm/ttm_placement.h> | ||
43 | #include <ttm/ttm_module.h> | ||
44 | |||
45 | #include <drm/qxl_drm.h> | ||
46 | #include "qxl_dev.h" | ||
47 | |||
48 | #define DRIVER_AUTHOR "Dave Airlie" | ||
49 | |||
50 | #define DRIVER_NAME "qxl" | ||
51 | #define DRIVER_DESC "RH QXL" | ||
52 | #define DRIVER_DATE "20120117" | ||
53 | |||
54 | #define DRIVER_MAJOR 0 | ||
55 | #define DRIVER_MINOR 1 | ||
56 | #define DRIVER_PATCHLEVEL 0 | ||
57 | |||
58 | #define QXL_NUM_OUTPUTS 1 | ||
59 | |||
60 | #define QXL_DEBUGFS_MAX_COMPONENTS 32 | ||
61 | |||
62 | extern int qxl_log_level; | ||
63 | |||
64 | enum { | ||
65 | QXL_INFO_LEVEL = 1, | ||
66 | QXL_DEBUG_LEVEL = 2, | ||
67 | }; | ||
68 | |||
69 | #define QXL_INFO(qdev, fmt, ...) do { \ | ||
70 | if (qxl_log_level >= QXL_INFO_LEVEL) { \ | ||
71 | qxl_io_log(qdev, fmt, __VA_ARGS__); \ | ||
72 | } \ | ||
73 | } while (0) | ||
74 | #define QXL_DEBUG(qdev, fmt, ...) do { \ | ||
75 | if (qxl_log_level >= QXL_DEBUG_LEVEL) { \ | ||
76 | qxl_io_log(qdev, fmt, __VA_ARGS__); \ | ||
77 | } \ | ||
78 | } while (0) | ||
79 | #define QXL_INFO_ONCE(qdev, fmt, ...) do { \ | ||
80 | static int done; \ | ||
81 | if (!done) { \ | ||
82 | done = 1; \ | ||
83 | QXL_INFO(qdev, fmt, __VA_ARGS__); \ | ||
84 | } \ | ||
85 | } while (0) | ||
86 | |||
87 | #define DRM_FILE_OFFSET 0x100000000ULL | ||
88 | #define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT) | ||
89 | |||
90 | #define QXL_INTERRUPT_MASK (\ | ||
91 | QXL_INTERRUPT_DISPLAY |\ | ||
92 | QXL_INTERRUPT_CURSOR |\ | ||
93 | QXL_INTERRUPT_IO_CMD |\ | ||
94 | QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) | ||
95 | |||
96 | struct qxl_fence { | ||
97 | struct qxl_device *qdev; | ||
98 | uint32_t num_active_releases; | ||
99 | uint32_t *release_ids; | ||
100 | struct radix_tree_root tree; | ||
101 | }; | ||
102 | |||
103 | struct qxl_bo { | ||
104 | /* Protected by gem.mutex */ | ||
105 | struct list_head list; | ||
106 | /* Protected by tbo.reserved */ | ||
107 | u32 placements[3]; | ||
108 | struct ttm_placement placement; | ||
109 | struct ttm_buffer_object tbo; | ||
110 | struct ttm_bo_kmap_obj kmap; | ||
111 | unsigned pin_count; | ||
112 | void *kptr; | ||
113 | int type; | ||
114 | /* Constant after initialization */ | ||
115 | struct drm_gem_object gem_base; | ||
116 | bool is_primary; /* is this now a primary surface */ | ||
117 | bool hw_surf_alloc; | ||
118 | struct qxl_surface surf; | ||
119 | uint32_t surface_id; | ||
120 | struct qxl_fence fence; /* per bo fence - list of releases */ | ||
121 | struct qxl_release *surf_create; | ||
122 | atomic_t reserve_count; | ||
123 | }; | ||
124 | #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) | ||
125 | |||
126 | struct qxl_gem { | ||
127 | struct mutex mutex; | ||
128 | struct list_head objects; | ||
129 | }; | ||
130 | |||
131 | struct qxl_bo_list { | ||
132 | struct list_head lhead; | ||
133 | struct qxl_bo *bo; | ||
134 | }; | ||
135 | |||
136 | struct qxl_reloc_list { | ||
137 | struct list_head bos; | ||
138 | }; | ||
139 | |||
140 | struct qxl_crtc { | ||
141 | struct drm_crtc base; | ||
142 | int cur_x; | ||
143 | int cur_y; | ||
144 | }; | ||
145 | |||
146 | struct qxl_output { | ||
147 | int index; | ||
148 | struct drm_connector base; | ||
149 | struct drm_encoder enc; | ||
150 | }; | ||
151 | |||
152 | struct qxl_framebuffer { | ||
153 | struct drm_framebuffer base; | ||
154 | struct drm_gem_object *obj; | ||
155 | }; | ||
156 | |||
157 | #define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) | ||
158 | #define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) | ||
159 | #define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, base) | ||
160 | #define to_qxl_framebuffer(x) container_of(x, struct qxl_framebuffer, base) | ||
161 | |||
162 | struct qxl_mman { | ||
163 | struct ttm_bo_global_ref bo_global_ref; | ||
164 | struct drm_global_reference mem_global_ref; | ||
165 | bool mem_global_referenced; | ||
166 | struct ttm_bo_device bdev; | ||
167 | }; | ||
168 | |||
169 | struct qxl_mode_info { | ||
170 | int num_modes; | ||
171 | struct qxl_mode *modes; | ||
172 | bool mode_config_initialized; | ||
173 | |||
174 | /* pointer to fbdev info structure */ | ||
175 | struct qxl_fbdev *qfbdev; | ||
176 | }; | ||
177 | |||
178 | |||
179 | struct qxl_memslot { | ||
180 | uint8_t generation; | ||
181 | uint64_t start_phys_addr; | ||
182 | uint64_t end_phys_addr; | ||
183 | uint64_t high_bits; | ||
184 | }; | ||
185 | |||
186 | enum { | ||
187 | QXL_RELEASE_DRAWABLE, | ||
188 | QXL_RELEASE_SURFACE_CMD, | ||
189 | QXL_RELEASE_CURSOR_CMD, | ||
190 | }; | ||
191 | |||
192 | /* drm_ prefix to differentiate from qxl_release_info in | ||
193 | * spice-protocol/qxl_dev.h */ | ||
194 | #define QXL_MAX_RES 96 | ||
195 | struct qxl_release { | ||
196 | int id; | ||
197 | int type; | ||
198 | int bo_count; | ||
199 | uint32_t release_offset; | ||
200 | uint32_t surface_release_id; | ||
201 | struct qxl_bo *bos[QXL_MAX_RES]; | ||
202 | }; | ||
203 | |||
204 | struct qxl_fb_image { | ||
205 | struct qxl_device *qdev; | ||
206 | uint32_t pseudo_palette[16]; | ||
207 | struct fb_image fb_image; | ||
208 | uint32_t visual; | ||
209 | }; | ||
210 | |||
211 | struct qxl_draw_fill { | ||
212 | struct qxl_device *qdev; | ||
213 | struct qxl_rect rect; | ||
214 | uint32_t color; | ||
215 | uint16_t rop; | ||
216 | }; | ||
217 | |||
218 | /* | ||
219 | * Debugfs | ||
220 | */ | ||
221 | struct qxl_debugfs { | ||
222 | struct drm_info_list *files; | ||
223 | unsigned num_files; | ||
224 | }; | ||
225 | |||
226 | int qxl_debugfs_add_files(struct qxl_device *rdev, | ||
227 | struct drm_info_list *files, | ||
228 | unsigned nfiles); | ||
229 | int qxl_debugfs_fence_init(struct qxl_device *rdev); | ||
230 | void qxl_debugfs_remove_files(struct qxl_device *qdev); | ||
231 | |||
232 | struct qxl_device; | ||
233 | |||
234 | struct qxl_device { | ||
235 | struct device *dev; | ||
236 | struct drm_device *ddev; | ||
237 | struct pci_dev *pdev; | ||
238 | unsigned long flags; | ||
239 | |||
240 | resource_size_t vram_base, vram_size; | ||
241 | resource_size_t surfaceram_base, surfaceram_size; | ||
242 | resource_size_t rom_base, rom_size; | ||
243 | struct qxl_rom *rom; | ||
244 | |||
245 | struct qxl_mode *modes; | ||
246 | struct qxl_bo *monitors_config_bo; | ||
247 | struct qxl_monitors_config *monitors_config; | ||
248 | |||
249 | /* last received client_monitors_config */ | ||
250 | struct qxl_monitors_config *client_monitors_config; | ||
251 | |||
252 | int io_base; | ||
253 | void *ram; | ||
254 | struct qxl_mman mman; | ||
255 | struct qxl_gem gem; | ||
256 | struct qxl_mode_info mode_info; | ||
257 | |||
258 | /* | ||
259 | * last created framebuffer with fb_create | ||
260 | * only used by debugfs dumbppm | ||
261 | */ | ||
262 | struct qxl_framebuffer *active_user_framebuffer; | ||
263 | |||
264 | struct fb_info *fbdev_info; | ||
265 | struct qxl_framebuffer *fbdev_qfb; | ||
266 | void *ram_physical; | ||
267 | |||
268 | struct qxl_ring *release_ring; | ||
269 | struct qxl_ring *command_ring; | ||
270 | struct qxl_ring *cursor_ring; | ||
271 | |||
272 | struct qxl_ram_header *ram_header; | ||
273 | bool mode_set; | ||
274 | |||
275 | bool primary_created; | ||
276 | |||
277 | struct qxl_memslot *mem_slots; | ||
278 | uint8_t n_mem_slots; | ||
279 | |||
280 | uint8_t main_mem_slot; | ||
281 | uint8_t surfaces_mem_slot; | ||
282 | uint8_t slot_id_bits; | ||
283 | uint8_t slot_gen_bits; | ||
284 | uint64_t va_slot_mask; | ||
285 | |||
286 | struct idr release_idr; | ||
287 | spinlock_t release_idr_lock; | ||
288 | struct mutex async_io_mutex; | ||
289 | unsigned int last_sent_io_cmd; | ||
290 | |||
291 | /* interrupt handling */ | ||
292 | atomic_t irq_received; | ||
293 | atomic_t irq_received_display; | ||
294 | atomic_t irq_received_cursor; | ||
295 | atomic_t irq_received_io_cmd; | ||
296 | unsigned irq_received_error; | ||
297 | wait_queue_head_t display_event; | ||
298 | wait_queue_head_t cursor_event; | ||
299 | wait_queue_head_t io_cmd_event; | ||
300 | struct work_struct client_monitors_config_work; | ||
301 | |||
302 | /* debugfs */ | ||
303 | struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS]; | ||
304 | unsigned debugfs_count; | ||
305 | |||
306 | struct mutex update_area_mutex; | ||
307 | |||
308 | struct idr surf_id_idr; | ||
309 | spinlock_t surf_id_idr_lock; | ||
310 | int last_alloced_surf_id; | ||
311 | |||
312 | struct mutex surf_evict_mutex; | ||
313 | struct io_mapping *vram_mapping; | ||
314 | struct io_mapping *surface_mapping; | ||
315 | |||
316 | /* */ | ||
317 | struct mutex release_mutex; | ||
318 | struct qxl_bo *current_release_bo[3]; | ||
319 | int current_release_bo_offset[3]; | ||
320 | |||
321 | struct workqueue_struct *gc_queue; | ||
322 | struct work_struct gc_work; | ||
323 | |||
324 | }; | ||
325 | |||
326 | /* forward declaration for QXL_INFO_IO */ | ||
327 | void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...); | ||
328 | |||
329 | extern struct drm_ioctl_desc qxl_ioctls[]; | ||
330 | extern int qxl_max_ioctl; | ||
331 | |||
332 | int qxl_driver_load(struct drm_device *dev, unsigned long flags); | ||
333 | int qxl_driver_unload(struct drm_device *dev); | ||
334 | |||
335 | int qxl_modeset_init(struct qxl_device *qdev); | ||
336 | void qxl_modeset_fini(struct qxl_device *qdev); | ||
337 | |||
338 | int qxl_bo_init(struct qxl_device *qdev); | ||
339 | void qxl_bo_fini(struct qxl_device *qdev); | ||
340 | |||
341 | struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header, | ||
342 | int element_size, | ||
343 | int n_elements, | ||
344 | int prod_notify, | ||
345 | bool set_prod_notify, | ||
346 | wait_queue_head_t *push_event); | ||
347 | void qxl_ring_free(struct qxl_ring *ring); | ||
348 | |||
349 | static inline void * | ||
350 | qxl_fb_virtual_address(struct qxl_device *qdev, unsigned long physical) | ||
351 | { | ||
352 | QXL_INFO(qdev, "not implemented (%lu)\n", physical); | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static inline uint64_t | ||
357 | qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, | ||
358 | unsigned long offset) | ||
359 | { | ||
360 | int slot_id = bo->type == QXL_GEM_DOMAIN_VRAM ? qdev->main_mem_slot : qdev->surfaces_mem_slot; | ||
361 | struct qxl_memslot *slot = &(qdev->mem_slots[slot_id]); | ||
362 | |||
363 | /* TODO - need to hold one of the locks to read tbo.offset */ | ||
364 | return slot->high_bits | (bo->tbo.offset + offset); | ||
365 | } | ||
366 | |||
367 | /* qxl_fb.c */ | ||
368 | #define QXLFB_CONN_LIMIT 1 | ||
369 | |||
370 | int qxl_fbdev_init(struct qxl_device *qdev); | ||
371 | void qxl_fbdev_fini(struct qxl_device *qdev); | ||
372 | int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, | ||
373 | struct drm_file *file_priv, | ||
374 | uint32_t *handle); | ||
375 | |||
376 | /* qxl_display.c */ | ||
377 | int | ||
378 | qxl_framebuffer_init(struct drm_device *dev, | ||
379 | struct qxl_framebuffer *rfb, | ||
380 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
381 | struct drm_gem_object *obj); | ||
382 | void qxl_display_read_client_monitors_config(struct qxl_device *qdev); | ||
383 | void qxl_send_monitors_config(struct qxl_device *qdev); | ||
384 | |||
385 | /* used by qxl_debugfs only */ | ||
386 | void qxl_crtc_set_from_monitors_config(struct qxl_device *qdev); | ||
387 | void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count); | ||
388 | |||
389 | /* qxl_gem.c */ | ||
390 | int qxl_gem_init(struct qxl_device *qdev); | ||
391 | void qxl_gem_fini(struct qxl_device *qdev); | ||
392 | int qxl_gem_object_create(struct qxl_device *qdev, int size, | ||
393 | int alignment, int initial_domain, | ||
394 | bool discardable, bool kernel, | ||
395 | struct qxl_surface *surf, | ||
396 | struct drm_gem_object **obj); | ||
397 | int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | ||
398 | uint64_t *gpu_addr); | ||
399 | void qxl_gem_object_unpin(struct drm_gem_object *obj); | ||
400 | int qxl_gem_object_create_with_handle(struct qxl_device *qdev, | ||
401 | struct drm_file *file_priv, | ||
402 | u32 domain, | ||
403 | size_t size, | ||
404 | struct qxl_surface *surf, | ||
405 | struct qxl_bo **qobj, | ||
406 | uint32_t *handle); | ||
407 | int qxl_gem_object_init(struct drm_gem_object *obj); | ||
408 | void qxl_gem_object_free(struct drm_gem_object *gobj); | ||
409 | int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); | ||
410 | void qxl_gem_object_close(struct drm_gem_object *obj, | ||
411 | struct drm_file *file_priv); | ||
412 | void qxl_bo_force_delete(struct qxl_device *qdev); | ||
413 | int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); | ||
414 | |||
415 | /* qxl_dumb.c */ | ||
416 | int qxl_mode_dumb_create(struct drm_file *file_priv, | ||
417 | struct drm_device *dev, | ||
418 | struct drm_mode_create_dumb *args); | ||
419 | int qxl_mode_dumb_destroy(struct drm_file *file_priv, | ||
420 | struct drm_device *dev, | ||
421 | uint32_t handle); | ||
422 | int qxl_mode_dumb_mmap(struct drm_file *filp, | ||
423 | struct drm_device *dev, | ||
424 | uint32_t handle, uint64_t *offset_p); | ||
425 | |||
426 | |||
427 | /* qxl ttm */ | ||
428 | int qxl_ttm_init(struct qxl_device *qdev); | ||
429 | void qxl_ttm_fini(struct qxl_device *qdev); | ||
430 | int qxl_mmap(struct file *filp, struct vm_area_struct *vma); | ||
431 | |||
432 | /* qxl image */ | ||
433 | |||
434 | int qxl_image_create(struct qxl_device *qdev, | ||
435 | struct qxl_release *release, | ||
436 | struct qxl_bo **image_bo, | ||
437 | const uint8_t *data, | ||
438 | int x, int y, int width, int height, | ||
439 | int depth, int stride); | ||
440 | void qxl_update_screen(struct qxl_device *qxl); | ||
441 | |||
442 | /* qxl io operations (qxl_cmd.c) */ | ||
443 | |||
444 | void qxl_io_create_primary(struct qxl_device *qdev, | ||
445 | unsigned width, unsigned height, unsigned offset, | ||
446 | struct qxl_bo *bo); | ||
447 | void qxl_io_destroy_primary(struct qxl_device *qdev); | ||
448 | void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id); | ||
449 | void qxl_io_notify_oom(struct qxl_device *qdev); | ||
450 | |||
451 | int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, | ||
452 | const struct qxl_rect *area); | ||
453 | |||
454 | void qxl_io_reset(struct qxl_device *qdev); | ||
455 | void qxl_io_monitors_config(struct qxl_device *qdev); | ||
456 | int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible); | ||
457 | void qxl_io_flush_release(struct qxl_device *qdev); | ||
458 | void qxl_io_flush_surfaces(struct qxl_device *qdev); | ||
459 | |||
460 | int qxl_release_reserve(struct qxl_device *qdev, | ||
461 | struct qxl_release *release, bool no_wait); | ||
462 | void qxl_release_unreserve(struct qxl_device *qdev, | ||
463 | struct qxl_release *release); | ||
464 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | ||
465 | struct qxl_release *release); | ||
466 | void qxl_release_unmap(struct qxl_device *qdev, | ||
467 | struct qxl_release *release, | ||
468 | union qxl_release_info *info); | ||
469 | /* | ||
470 | * qxl_bo_add_resource. | ||
471 | * | ||
472 | */ | ||
473 | void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); | ||
474 | |||
475 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | ||
476 | enum qxl_surface_cmd_type surface_cmd_type, | ||
477 | struct qxl_release *create_rel, | ||
478 | struct qxl_release **release); | ||
479 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | ||
480 | int type, struct qxl_release **release, | ||
481 | struct qxl_bo **rbo); | ||
482 | int qxl_fence_releaseable(struct qxl_device *qdev, | ||
483 | struct qxl_release *release); | ||
484 | int | ||
485 | qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, | ||
486 | uint32_t type, bool interruptible); | ||
487 | int | ||
488 | qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, | ||
489 | uint32_t type, bool interruptible); | ||
490 | int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, | ||
491 | struct qxl_bo **_bo); | ||
492 | /* qxl drawing commands */ | ||
493 | |||
494 | void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, | ||
495 | int stride /* filled in if 0 */); | ||
496 | |||
497 | void qxl_draw_dirty_fb(struct qxl_device *qdev, | ||
498 | struct qxl_framebuffer *qxl_fb, | ||
499 | struct qxl_bo *bo, | ||
500 | unsigned flags, unsigned color, | ||
501 | struct drm_clip_rect *clips, | ||
502 | unsigned num_clips, int inc); | ||
503 | |||
504 | void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec); | ||
505 | |||
506 | void qxl_draw_copyarea(struct qxl_device *qdev, | ||
507 | u32 width, u32 height, | ||
508 | u32 sx, u32 sy, | ||
509 | u32 dx, u32 dy); | ||
510 | |||
511 | uint64_t | ||
512 | qxl_release_alloc(struct qxl_device *qdev, int type, | ||
513 | struct qxl_release **ret); | ||
514 | |||
515 | void qxl_release_free(struct qxl_device *qdev, | ||
516 | struct qxl_release *release); | ||
517 | void qxl_release_add_res(struct qxl_device *qdev, | ||
518 | struct qxl_release *release, | ||
519 | struct qxl_bo *bo); | ||
520 | /* used by qxl_debugfs_release */ | ||
521 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | ||
522 | uint64_t id); | ||
523 | |||
524 | bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush); | ||
525 | int qxl_garbage_collect(struct qxl_device *qdev); | ||
526 | |||
527 | /* debugfs */ | ||
528 | |||
529 | int qxl_debugfs_init(struct drm_minor *minor); | ||
530 | void qxl_debugfs_takedown(struct drm_minor *minor); | ||
531 | |||
532 | /* qxl_irq.c */ | ||
533 | int qxl_irq_init(struct qxl_device *qdev); | ||
534 | irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS); | ||
535 | |||
536 | /* qxl_fb.c */ | ||
537 | int qxl_fb_init(struct qxl_device *qdev); | ||
538 | |||
539 | int qxl_debugfs_add_files(struct qxl_device *qdev, | ||
540 | struct drm_info_list *files, | ||
541 | unsigned nfiles); | ||
542 | |||
543 | int qxl_surface_id_alloc(struct qxl_device *qdev, | ||
544 | struct qxl_bo *surf); | ||
545 | void qxl_surface_id_dealloc(struct qxl_device *qdev, | ||
546 | uint32_t surface_id); | ||
547 | int qxl_hw_surface_alloc(struct qxl_device *qdev, | ||
548 | struct qxl_bo *surf, | ||
549 | struct ttm_mem_reg *mem); | ||
550 | int qxl_hw_surface_dealloc(struct qxl_device *qdev, | ||
551 | struct qxl_bo *surf); | ||
552 | |||
553 | int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo); | ||
554 | |||
555 | struct qxl_drv_surface * | ||
556 | qxl_surface_lookup(struct drm_device *dev, int surface_id); | ||
557 | void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); | ||
558 | int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); | ||
559 | |||
560 | /* qxl_fence.c */ | ||
561 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); | ||
562 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); | ||
563 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); | ||
564 | void qxl_fence_fini(struct qxl_fence *qfence); | ||
565 | |||
566 | #endif | ||
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c new file mode 100644 index 000000000000..847c4ee798f7 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_dumb.c | |||
@@ -0,0 +1,93 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include "qxl_drv.h" | ||
27 | #include "qxl_object.h" | ||
28 | |||
29 | /* dumb ioctls implementation */ | ||
30 | |||
31 | int qxl_mode_dumb_create(struct drm_file *file_priv, | ||
32 | struct drm_device *dev, | ||
33 | struct drm_mode_create_dumb *args) | ||
34 | { | ||
35 | struct qxl_device *qdev = dev->dev_private; | ||
36 | struct qxl_bo *qobj; | ||
37 | uint32_t handle; | ||
38 | int r; | ||
39 | struct qxl_surface surf; | ||
40 | uint32_t pitch, format; | ||
41 | pitch = args->width * ((args->bpp + 1) / 8); | ||
42 | args->size = pitch * args->height; | ||
43 | args->size = ALIGN(args->size, PAGE_SIZE); | ||
44 | |||
45 | switch (args->bpp) { | ||
46 | case 16: | ||
47 | format = SPICE_SURFACE_FMT_16_565; | ||
48 | break; | ||
49 | case 32: | ||
50 | format = SPICE_SURFACE_FMT_32_xRGB; | ||
51 | break; | ||
52 | default: | ||
53 | return -EINVAL; | ||
54 | } | ||
55 | |||
56 | surf.width = args->width; | ||
57 | surf.height = args->height; | ||
58 | surf.stride = pitch; | ||
59 | surf.format = format; | ||
60 | r = qxl_gem_object_create_with_handle(qdev, file_priv, | ||
61 | QXL_GEM_DOMAIN_VRAM, | ||
62 | args->size, &surf, &qobj, | ||
63 | &handle); | ||
64 | if (r) | ||
65 | return r; | ||
66 | args->pitch = pitch; | ||
67 | args->handle = handle; | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | int qxl_mode_dumb_destroy(struct drm_file *file_priv, | ||
72 | struct drm_device *dev, | ||
73 | uint32_t handle) | ||
74 | { | ||
75 | return drm_gem_handle_delete(file_priv, handle); | ||
76 | } | ||
77 | |||
78 | int qxl_mode_dumb_mmap(struct drm_file *file_priv, | ||
79 | struct drm_device *dev, | ||
80 | uint32_t handle, uint64_t *offset_p) | ||
81 | { | ||
82 | struct drm_gem_object *gobj; | ||
83 | struct qxl_bo *qobj; | ||
84 | |||
85 | BUG_ON(!offset_p); | ||
86 | gobj = drm_gem_object_lookup(dev, file_priv, handle); | ||
87 | if (gobj == NULL) | ||
88 | return -ENOENT; | ||
89 | qobj = gem_to_qxl_bo(gobj); | ||
90 | *offset_p = qxl_bo_mmap_offset(qobj); | ||
91 | drm_gem_object_unreference_unlocked(gobj); | ||
92 | return 0; | ||
93 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c new file mode 100644 index 000000000000..232b52b50194 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_fb.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /* | ||
2 | * Copyright © 2013 Red Hat | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Authors: | ||
24 | * David Airlie | ||
25 | */ | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/fb.h> | ||
28 | |||
29 | #include "drmP.h" | ||
30 | #include "drm/drm.h" | ||
31 | #include "drm/drm_crtc.h" | ||
32 | #include "drm/drm_crtc_helper.h" | ||
33 | #include "qxl_drv.h" | ||
34 | |||
35 | #include "qxl_object.h" | ||
36 | #include "drm_fb_helper.h" | ||
37 | |||
38 | #define QXL_DIRTY_DELAY (HZ / 30) | ||
39 | |||
40 | struct qxl_fbdev { | ||
41 | struct drm_fb_helper helper; | ||
42 | struct qxl_framebuffer qfb; | ||
43 | struct list_head fbdev_list; | ||
44 | struct qxl_device *qdev; | ||
45 | |||
46 | void *shadow; | ||
47 | int size; | ||
48 | |||
49 | /* dirty memory logging */ | ||
50 | struct { | ||
51 | spinlock_t lock; | ||
52 | bool active; | ||
53 | unsigned x1; | ||
54 | unsigned y1; | ||
55 | unsigned x2; | ||
56 | unsigned y2; | ||
57 | } dirty; | ||
58 | }; | ||
59 | |||
60 | static void qxl_fb_image_init(struct qxl_fb_image *qxl_fb_image, | ||
61 | struct qxl_device *qdev, struct fb_info *info, | ||
62 | const struct fb_image *image) | ||
63 | { | ||
64 | qxl_fb_image->qdev = qdev; | ||
65 | if (info) { | ||
66 | qxl_fb_image->visual = info->fix.visual; | ||
67 | if (qxl_fb_image->visual == FB_VISUAL_TRUECOLOR || | ||
68 | qxl_fb_image->visual == FB_VISUAL_DIRECTCOLOR) | ||
69 | memcpy(&qxl_fb_image->pseudo_palette, | ||
70 | info->pseudo_palette, | ||
71 | sizeof(qxl_fb_image->pseudo_palette)); | ||
72 | } else { | ||
73 | /* fallback */ | ||
74 | if (image->depth == 1) | ||
75 | qxl_fb_image->visual = FB_VISUAL_MONO10; | ||
76 | else | ||
77 | qxl_fb_image->visual = FB_VISUAL_DIRECTCOLOR; | ||
78 | } | ||
79 | if (image) { | ||
80 | memcpy(&qxl_fb_image->fb_image, image, | ||
81 | sizeof(qxl_fb_image->fb_image)); | ||
82 | } | ||
83 | } | ||
84 | |||
85 | static void qxl_fb_dirty_flush(struct fb_info *info) | ||
86 | { | ||
87 | struct qxl_fbdev *qfbdev = info->par; | ||
88 | struct qxl_device *qdev = qfbdev->qdev; | ||
89 | struct qxl_fb_image qxl_fb_image; | ||
90 | struct fb_image *image = &qxl_fb_image.fb_image; | ||
91 | u32 x1, x2, y1, y2; | ||
92 | |||
93 | /* TODO: hard coding 32 bpp */ | ||
94 | int stride = qfbdev->qfb.base.pitches[0] * 4; | ||
95 | |||
96 | x1 = qfbdev->dirty.x1; | ||
97 | x2 = qfbdev->dirty.x2; | ||
98 | y1 = qfbdev->dirty.y1; | ||
99 | y2 = qfbdev->dirty.y2; | ||
100 | /* | ||
101 | * we are using a shadow draw buffer, at qdev->surface0_shadow | ||
102 | */ | ||
103 | qxl_io_log(qdev, "dirty x[%d, %d], y[%d, %d]", x1, x2, y1, y2); | ||
104 | image->dx = x1; | ||
105 | image->dy = y1; | ||
106 | image->width = x2 - x1; | ||
107 | image->height = y2 - y1; | ||
108 | image->fg_color = 0xffffffff; /* unused, just to avoid uninitialized | ||
109 | warnings */ | ||
110 | image->bg_color = 0; | ||
111 | image->depth = 32; /* TODO: take from somewhere? */ | ||
112 | image->cmap.start = 0; | ||
113 | image->cmap.len = 0; | ||
114 | image->cmap.red = NULL; | ||
115 | image->cmap.green = NULL; | ||
116 | image->cmap.blue = NULL; | ||
117 | image->cmap.transp = NULL; | ||
118 | image->data = qfbdev->shadow + (x1 * 4) + (stride * y1); | ||
119 | |||
120 | qxl_fb_image_init(&qxl_fb_image, qdev, info, NULL); | ||
121 | qxl_draw_opaque_fb(&qxl_fb_image, stride); | ||
122 | qfbdev->dirty.x1 = 0; | ||
123 | qfbdev->dirty.x2 = 0; | ||
124 | qfbdev->dirty.y1 = 0; | ||
125 | qfbdev->dirty.y2 = 0; | ||
126 | } | ||
127 | |||
128 | static void qxl_deferred_io(struct fb_info *info, | ||
129 | struct list_head *pagelist) | ||
130 | { | ||
131 | struct qxl_fbdev *qfbdev = info->par; | ||
132 | unsigned long start, end, min, max; | ||
133 | struct page *page; | ||
134 | int y1, y2; | ||
135 | |||
136 | min = ULONG_MAX; | ||
137 | max = 0; | ||
138 | list_for_each_entry(page, pagelist, lru) { | ||
139 | start = page->index << PAGE_SHIFT; | ||
140 | end = start + PAGE_SIZE - 1; | ||
141 | min = min(min, start); | ||
142 | max = max(max, end); | ||
143 | } | ||
144 | |||
145 | if (min < max) { | ||
146 | y1 = min / info->fix.line_length; | ||
147 | y2 = (max / info->fix.line_length) + 1; | ||
148 | |||
149 | /* TODO: add spin lock? */ | ||
150 | /* spin_lock_irqsave(&qfbdev->dirty.lock, flags); */ | ||
151 | qfbdev->dirty.x1 = 0; | ||
152 | qfbdev->dirty.y1 = y1; | ||
153 | qfbdev->dirty.x2 = info->var.xres; | ||
154 | qfbdev->dirty.y2 = y2; | ||
155 | /* spin_unlock_irqrestore(&qfbdev->dirty.lock, flags); */ | ||
156 | } | ||
157 | |||
158 | qxl_fb_dirty_flush(info); | ||
159 | }; | ||
160 | |||
161 | |||
162 | struct fb_deferred_io qxl_defio = { | ||
163 | .delay = QXL_DIRTY_DELAY, | ||
164 | .deferred_io = qxl_deferred_io, | ||
165 | }; | ||
166 | |||
167 | static void qxl_fb_fillrect(struct fb_info *info, | ||
168 | const struct fb_fillrect *fb_rect) | ||
169 | { | ||
170 | struct qxl_fbdev *qfbdev = info->par; | ||
171 | struct qxl_device *qdev = qfbdev->qdev; | ||
172 | struct qxl_rect rect; | ||
173 | uint32_t color; | ||
174 | int x = fb_rect->dx; | ||
175 | int y = fb_rect->dy; | ||
176 | int width = fb_rect->width; | ||
177 | int height = fb_rect->height; | ||
178 | uint16_t rop; | ||
179 | struct qxl_draw_fill qxl_draw_fill_rec; | ||
180 | |||
181 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | ||
182 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) | ||
183 | color = ((u32 *) (info->pseudo_palette))[fb_rect->color]; | ||
184 | else | ||
185 | color = fb_rect->color; | ||
186 | rect.left = x; | ||
187 | rect.right = x + width; | ||
188 | rect.top = y; | ||
189 | rect.bottom = y + height; | ||
190 | switch (fb_rect->rop) { | ||
191 | case ROP_XOR: | ||
192 | rop = SPICE_ROPD_OP_XOR; | ||
193 | break; | ||
194 | case ROP_COPY: | ||
195 | rop = SPICE_ROPD_OP_PUT; | ||
196 | break; | ||
197 | default: | ||
198 | pr_err("qxl_fb_fillrect(): unknown rop, " | ||
199 | "defaulting to SPICE_ROPD_OP_PUT\n"); | ||
200 | rop = SPICE_ROPD_OP_PUT; | ||
201 | } | ||
202 | qxl_draw_fill_rec.qdev = qdev; | ||
203 | qxl_draw_fill_rec.rect = rect; | ||
204 | qxl_draw_fill_rec.color = color; | ||
205 | qxl_draw_fill_rec.rop = rop; | ||
206 | if (!drm_can_sleep()) { | ||
207 | qxl_io_log(qdev, | ||
208 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | ||
209 | __func__); | ||
210 | return; | ||
211 | } | ||
212 | qxl_draw_fill(&qxl_draw_fill_rec); | ||
213 | } | ||
214 | |||
215 | static void qxl_fb_copyarea(struct fb_info *info, | ||
216 | const struct fb_copyarea *region) | ||
217 | { | ||
218 | struct qxl_fbdev *qfbdev = info->par; | ||
219 | |||
220 | qxl_draw_copyarea(qfbdev->qdev, | ||
221 | region->width, region->height, | ||
222 | region->sx, region->sy, | ||
223 | region->dx, region->dy); | ||
224 | } | ||
225 | |||
226 | static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) | ||
227 | { | ||
228 | qxl_draw_opaque_fb(qxl_fb_image, 0); | ||
229 | } | ||
230 | |||
231 | static void qxl_fb_imageblit(struct fb_info *info, | ||
232 | const struct fb_image *image) | ||
233 | { | ||
234 | struct qxl_fbdev *qfbdev = info->par; | ||
235 | struct qxl_device *qdev = qfbdev->qdev; | ||
236 | struct qxl_fb_image qxl_fb_image; | ||
237 | |||
238 | if (!drm_can_sleep()) { | ||
239 | /* we cannot do any ttm_bo allocation since that will fail on | ||
240 | * ioremap_wc..__get_vm_area_node, so queue the work item | ||
241 | * instead This can happen from printk inside an interrupt | ||
242 | * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ | ||
243 | qxl_io_log(qdev, | ||
244 | "%s: TODO use RCU, mysterious locks with spin_lock\n", | ||
245 | __func__); | ||
246 | return; | ||
247 | } | ||
248 | |||
249 | /* ensure proper order of rendering operations - TODO: must do this | ||
250 | * for everything. */ | ||
251 | qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); | ||
252 | qxl_fb_imageblit_safe(&qxl_fb_image); | ||
253 | } | ||
254 | |||
255 | int qxl_fb_init(struct qxl_device *qdev) | ||
256 | { | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static struct fb_ops qxlfb_ops = { | ||
261 | .owner = THIS_MODULE, | ||
262 | .fb_check_var = drm_fb_helper_check_var, | ||
263 | .fb_set_par = drm_fb_helper_set_par, /* TODO: copy vmwgfx */ | ||
264 | .fb_fillrect = qxl_fb_fillrect, | ||
265 | .fb_copyarea = qxl_fb_copyarea, | ||
266 | .fb_imageblit = qxl_fb_imageblit, | ||
267 | .fb_pan_display = drm_fb_helper_pan_display, | ||
268 | .fb_blank = drm_fb_helper_blank, | ||
269 | .fb_setcmap = drm_fb_helper_setcmap, | ||
270 | .fb_debug_enter = drm_fb_helper_debug_enter, | ||
271 | .fb_debug_leave = drm_fb_helper_debug_leave, | ||
272 | }; | ||
273 | |||
274 | static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj) | ||
275 | { | ||
276 | struct qxl_bo *qbo = gem_to_qxl_bo(gobj); | ||
277 | int ret; | ||
278 | |||
279 | ret = qxl_bo_reserve(qbo, false); | ||
280 | if (likely(ret == 0)) { | ||
281 | qxl_bo_kunmap(qbo); | ||
282 | qxl_bo_unpin(qbo); | ||
283 | qxl_bo_unreserve(qbo); | ||
284 | } | ||
285 | drm_gem_object_unreference_unlocked(gobj); | ||
286 | } | ||
287 | |||
288 | int qxl_get_handle_for_primary_fb(struct qxl_device *qdev, | ||
289 | struct drm_file *file_priv, | ||
290 | uint32_t *handle) | ||
291 | { | ||
292 | int r; | ||
293 | struct drm_gem_object *gobj = qdev->fbdev_qfb->obj; | ||
294 | |||
295 | BUG_ON(!gobj); | ||
296 | /* drm_get_handle_create adds a reference - good */ | ||
297 | r = drm_gem_handle_create(file_priv, gobj, handle); | ||
298 | if (r) | ||
299 | return r; | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, | ||
304 | struct drm_mode_fb_cmd2 *mode_cmd, | ||
305 | struct drm_gem_object **gobj_p) | ||
306 | { | ||
307 | struct qxl_device *qdev = qfbdev->qdev; | ||
308 | struct drm_gem_object *gobj = NULL; | ||
309 | struct qxl_bo *qbo = NULL; | ||
310 | int ret; | ||
311 | int aligned_size, size; | ||
312 | int height = mode_cmd->height; | ||
313 | int bpp; | ||
314 | int depth; | ||
315 | |||
316 | drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth); | ||
317 | |||
318 | size = mode_cmd->pitches[0] * height; | ||
319 | aligned_size = ALIGN(size, PAGE_SIZE); | ||
320 | /* TODO: unallocate and reallocate surface0 for real. Hack to just | ||
321 | * have a large enough surface0 for 1024x768 Xorg 32bpp mode */ | ||
322 | ret = qxl_gem_object_create(qdev, aligned_size, 0, | ||
323 | QXL_GEM_DOMAIN_SURFACE, | ||
324 | false, /* is discardable */ | ||
325 | false, /* is kernel (false means device) */ | ||
326 | NULL, | ||
327 | &gobj); | ||
328 | if (ret) { | ||
329 | pr_err("failed to allocate framebuffer (%d)\n", | ||
330 | aligned_size); | ||
331 | return -ENOMEM; | ||
332 | } | ||
333 | qbo = gem_to_qxl_bo(gobj); | ||
334 | |||
335 | qbo->surf.width = mode_cmd->width; | ||
336 | qbo->surf.height = mode_cmd->height; | ||
337 | qbo->surf.stride = mode_cmd->pitches[0]; | ||
338 | qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB; | ||
339 | ret = qxl_bo_reserve(qbo, false); | ||
340 | if (unlikely(ret != 0)) | ||
341 | goto out_unref; | ||
342 | ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL); | ||
343 | if (ret) { | ||
344 | qxl_bo_unreserve(qbo); | ||
345 | goto out_unref; | ||
346 | } | ||
347 | ret = qxl_bo_kmap(qbo, NULL); | ||
348 | qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */ | ||
349 | if (ret) | ||
350 | goto out_unref; | ||
351 | |||
352 | *gobj_p = gobj; | ||
353 | return 0; | ||
354 | out_unref: | ||
355 | qxlfb_destroy_pinned_object(gobj); | ||
356 | *gobj_p = NULL; | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | static int qxlfb_create(struct qxl_fbdev *qfbdev, | ||
361 | struct drm_fb_helper_surface_size *sizes) | ||
362 | { | ||
363 | struct qxl_device *qdev = qfbdev->qdev; | ||
364 | struct fb_info *info; | ||
365 | struct drm_framebuffer *fb = NULL; | ||
366 | struct drm_mode_fb_cmd2 mode_cmd; | ||
367 | struct drm_gem_object *gobj = NULL; | ||
368 | struct qxl_bo *qbo = NULL; | ||
369 | struct device *device = &qdev->pdev->dev; | ||
370 | int ret; | ||
371 | int size; | ||
372 | int bpp = sizes->surface_bpp; | ||
373 | int depth = sizes->surface_depth; | ||
374 | void *shadow; | ||
375 | |||
376 | mode_cmd.width = sizes->surface_width; | ||
377 | mode_cmd.height = sizes->surface_height; | ||
378 | |||
379 | mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64); | ||
380 | mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); | ||
381 | |||
382 | ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj); | ||
383 | qbo = gem_to_qxl_bo(gobj); | ||
384 | QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width, | ||
385 | mode_cmd.height, mode_cmd.pitches[0]); | ||
386 | |||
387 | shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height); | ||
388 | /* TODO: what's the usual response to memory allocation errors? */ | ||
389 | BUG_ON(!shadow); | ||
390 | QXL_INFO(qdev, | ||
391 | "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", | ||
392 | qxl_bo_gpu_offset(qbo), | ||
393 | qxl_bo_mmap_offset(qbo), | ||
394 | qbo->kptr, | ||
395 | shadow); | ||
396 | size = mode_cmd.pitches[0] * mode_cmd.height; | ||
397 | |||
398 | info = framebuffer_alloc(0, device); | ||
399 | if (info == NULL) { | ||
400 | ret = -ENOMEM; | ||
401 | goto out_unref; | ||
402 | } | ||
403 | |||
404 | info->par = qfbdev; | ||
405 | |||
406 | qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj); | ||
407 | |||
408 | fb = &qfbdev->qfb.base; | ||
409 | |||
410 | /* setup helper with fb data */ | ||
411 | qfbdev->helper.fb = fb; | ||
412 | qfbdev->helper.fbdev = info; | ||
413 | qfbdev->shadow = shadow; | ||
414 | strcpy(info->fix.id, "qxldrmfb"); | ||
415 | |||
416 | drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); | ||
417 | |||
418 | info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; | ||
419 | info->fbops = &qxlfb_ops; | ||
420 | |||
421 | /* | ||
422 | * TODO: using gobj->size in various places in this function. Not sure | ||
423 | * what the difference between the different sizes is. | ||
424 | */ | ||
425 | info->fix.smem_start = qdev->vram_base; /* TODO - correct? */ | ||
426 | info->fix.smem_len = gobj->size; | ||
427 | info->screen_base = qfbdev->shadow; | ||
428 | info->screen_size = gobj->size; | ||
429 | |||
430 | drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width, | ||
431 | sizes->fb_height); | ||
432 | |||
433 | /* setup aperture base/size for vesafb takeover */ | ||
434 | info->apertures = alloc_apertures(1); | ||
435 | if (!info->apertures) { | ||
436 | ret = -ENOMEM; | ||
437 | goto out_unref; | ||
438 | } | ||
439 | info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base; | ||
440 | info->apertures->ranges[0].size = qdev->vram_size; | ||
441 | |||
442 | info->fix.mmio_start = 0; | ||
443 | info->fix.mmio_len = 0; | ||
444 | |||
445 | if (info->screen_base == NULL) { | ||
446 | ret = -ENOSPC; | ||
447 | goto out_unref; | ||
448 | } | ||
449 | |||
450 | ret = fb_alloc_cmap(&info->cmap, 256, 0); | ||
451 | if (ret) { | ||
452 | ret = -ENOMEM; | ||
453 | goto out_unref; | ||
454 | } | ||
455 | |||
456 | info->fbdefio = &qxl_defio; | ||
457 | fb_deferred_io_init(info); | ||
458 | |||
459 | qdev->fbdev_info = info; | ||
460 | qdev->fbdev_qfb = &qfbdev->qfb; | ||
461 | DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size); | ||
462 | DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height); | ||
463 | return 0; | ||
464 | |||
465 | out_unref: | ||
466 | if (qbo) { | ||
467 | ret = qxl_bo_reserve(qbo, false); | ||
468 | if (likely(ret == 0)) { | ||
469 | qxl_bo_kunmap(qbo); | ||
470 | qxl_bo_unpin(qbo); | ||
471 | qxl_bo_unreserve(qbo); | ||
472 | } | ||
473 | } | ||
474 | if (fb && ret) { | ||
475 | drm_gem_object_unreference(gobj); | ||
476 | drm_framebuffer_cleanup(fb); | ||
477 | kfree(fb); | ||
478 | } | ||
479 | drm_gem_object_unreference(gobj); | ||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | static int qxl_fb_find_or_create_single( | ||
484 | struct drm_fb_helper *helper, | ||
485 | struct drm_fb_helper_surface_size *sizes) | ||
486 | { | ||
487 | struct qxl_fbdev *qfbdev = (struct qxl_fbdev *)helper; | ||
488 | int new_fb = 0; | ||
489 | int ret; | ||
490 | |||
491 | if (!helper->fb) { | ||
492 | ret = qxlfb_create(qfbdev, sizes); | ||
493 | if (ret) | ||
494 | return ret; | ||
495 | new_fb = 1; | ||
496 | } | ||
497 | return new_fb; | ||
498 | } | ||
499 | |||
500 | static int qxl_fbdev_destroy(struct drm_device *dev, struct qxl_fbdev *qfbdev) | ||
501 | { | ||
502 | struct fb_info *info; | ||
503 | struct qxl_framebuffer *qfb = &qfbdev->qfb; | ||
504 | |||
505 | if (qfbdev->helper.fbdev) { | ||
506 | info = qfbdev->helper.fbdev; | ||
507 | |||
508 | unregister_framebuffer(info); | ||
509 | framebuffer_release(info); | ||
510 | } | ||
511 | if (qfb->obj) { | ||
512 | qxlfb_destroy_pinned_object(qfb->obj); | ||
513 | qfb->obj = NULL; | ||
514 | } | ||
515 | drm_fb_helper_fini(&qfbdev->helper); | ||
516 | vfree(qfbdev->shadow); | ||
517 | drm_framebuffer_cleanup(&qfb->base); | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | static struct drm_fb_helper_funcs qxl_fb_helper_funcs = { | ||
523 | /* TODO | ||
524 | .gamma_set = qxl_crtc_fb_gamma_set, | ||
525 | .gamma_get = qxl_crtc_fb_gamma_get, | ||
526 | */ | ||
527 | .fb_probe = qxl_fb_find_or_create_single, | ||
528 | }; | ||
529 | |||
530 | int qxl_fbdev_init(struct qxl_device *qdev) | ||
531 | { | ||
532 | struct qxl_fbdev *qfbdev; | ||
533 | int bpp_sel = 32; /* TODO: parameter from somewhere? */ | ||
534 | int ret; | ||
535 | |||
536 | qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); | ||
537 | if (!qfbdev) | ||
538 | return -ENOMEM; | ||
539 | |||
540 | qfbdev->qdev = qdev; | ||
541 | qdev->mode_info.qfbdev = qfbdev; | ||
542 | qfbdev->helper.funcs = &qxl_fb_helper_funcs; | ||
543 | |||
544 | ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, | ||
545 | 1 /* num_crtc - QXL supports just 1 */, | ||
546 | QXLFB_CONN_LIMIT); | ||
547 | if (ret) { | ||
548 | kfree(qfbdev); | ||
549 | return ret; | ||
550 | } | ||
551 | |||
552 | drm_fb_helper_single_add_all_connectors(&qfbdev->helper); | ||
553 | drm_fb_helper_initial_config(&qfbdev->helper, bpp_sel); | ||
554 | return 0; | ||
555 | } | ||
556 | |||
557 | void qxl_fbdev_fini(struct qxl_device *qdev) | ||
558 | { | ||
559 | if (!qdev->mode_info.qfbdev) | ||
560 | return; | ||
561 | |||
562 | qxl_fbdev_destroy(qdev->ddev, qdev->mode_info.qfbdev); | ||
563 | kfree(qdev->mode_info.qfbdev); | ||
564 | qdev->mode_info.qfbdev = NULL; | ||
565 | } | ||
566 | |||
567 | |||
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c new file mode 100644 index 000000000000..63c6715ad385 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_fence.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | |||
27 | #include "qxl_drv.h" | ||
28 | |||
29 | /* QXL fencing- | ||
30 | |||
31 | When we submit operations to the GPU we pass a release reference to the GPU | ||
32 | with them, the release reference is then added to the release ring when | ||
33 | the GPU is finished with that particular operation and has removed it from | ||
34 | its tree. | ||
35 | |||
36 | So we have can have multiple outstanding non linear fences per object. | ||
37 | |||
38 | From a TTM POV we only care if the object has any outstanding releases on | ||
39 | it. | ||
40 | |||
41 | we wait until all outstanding releases are processeed. | ||
42 | |||
43 | sync object is just a list of release ids that represent that fence on | ||
44 | that buffer. | ||
45 | |||
46 | we just add new releases onto the sync object attached to the object. | ||
47 | |||
48 | This currently uses a radix tree to store the list of release ids. | ||
49 | |||
50 | For some reason every so often qxl hw fails to release, things go wrong. | ||
51 | */ | ||
52 | |||
53 | |||
54 | int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) | ||
55 | { | ||
56 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
57 | |||
58 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
59 | radix_tree_insert(&qfence->tree, rel_id, qfence); | ||
60 | qfence->num_active_releases++; | ||
61 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) | ||
66 | { | ||
67 | void *ret; | ||
68 | int retval = 0; | ||
69 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
70 | |||
71 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
72 | |||
73 | ret = radix_tree_delete(&qfence->tree, rel_id); | ||
74 | if (ret == qfence) | ||
75 | qfence->num_active_releases--; | ||
76 | else { | ||
77 | DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id); | ||
78 | retval = -ENOENT; | ||
79 | } | ||
80 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
81 | return retval; | ||
82 | } | ||
83 | |||
84 | |||
85 | int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence) | ||
86 | { | ||
87 | qfence->qdev = qdev; | ||
88 | qfence->num_active_releases = 0; | ||
89 | INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC); | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | void qxl_fence_fini(struct qxl_fence *qfence) | ||
94 | { | ||
95 | kfree(qfence->release_ids); | ||
96 | qfence->num_active_releases = 0; | ||
97 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c new file mode 100644 index 000000000000..adc1ee2cf7fb --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_gem.c | |||
@@ -0,0 +1,178 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include "drmP.h" | ||
27 | #include "drm/drm.h" | ||
28 | #include "qxl_drv.h" | ||
29 | #include "qxl_object.h" | ||
30 | |||
31 | int qxl_gem_object_init(struct drm_gem_object *obj) | ||
32 | { | ||
33 | /* we do nothings here */ | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | void qxl_gem_object_free(struct drm_gem_object *gobj) | ||
38 | { | ||
39 | struct qxl_bo *qobj = gem_to_qxl_bo(gobj); | ||
40 | |||
41 | if (qobj) | ||
42 | qxl_bo_unref(&qobj); | ||
43 | } | ||
44 | |||
45 | int qxl_gem_object_create(struct qxl_device *qdev, int size, | ||
46 | int alignment, int initial_domain, | ||
47 | bool discardable, bool kernel, | ||
48 | struct qxl_surface *surf, | ||
49 | struct drm_gem_object **obj) | ||
50 | { | ||
51 | struct qxl_bo *qbo; | ||
52 | int r; | ||
53 | |||
54 | *obj = NULL; | ||
55 | /* At least align on page size */ | ||
56 | if (alignment < PAGE_SIZE) | ||
57 | alignment = PAGE_SIZE; | ||
58 | r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); | ||
59 | if (r) { | ||
60 | if (r != -ERESTARTSYS) | ||
61 | DRM_ERROR( | ||
62 | "Failed to allocate GEM object (%d, %d, %u, %d)\n", | ||
63 | size, initial_domain, alignment, r); | ||
64 | return r; | ||
65 | } | ||
66 | *obj = &qbo->gem_base; | ||
67 | |||
68 | mutex_lock(&qdev->gem.mutex); | ||
69 | list_add_tail(&qbo->list, &qdev->gem.objects); | ||
70 | mutex_unlock(&qdev->gem.mutex); | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | int qxl_gem_object_create_with_handle(struct qxl_device *qdev, | ||
76 | struct drm_file *file_priv, | ||
77 | u32 domain, | ||
78 | size_t size, | ||
79 | struct qxl_surface *surf, | ||
80 | struct qxl_bo **qobj, | ||
81 | uint32_t *handle) | ||
82 | { | ||
83 | struct drm_gem_object *gobj; | ||
84 | int r; | ||
85 | |||
86 | BUG_ON(!qobj); | ||
87 | BUG_ON(!handle); | ||
88 | |||
89 | r = qxl_gem_object_create(qdev, size, 0, | ||
90 | domain, | ||
91 | false, false, surf, | ||
92 | &gobj); | ||
93 | if (r) | ||
94 | return -ENOMEM; | ||
95 | r = drm_gem_handle_create(file_priv, gobj, handle); | ||
96 | if (r) | ||
97 | return r; | ||
98 | /* drop reference from allocate - handle holds it now */ | ||
99 | *qobj = gem_to_qxl_bo(gobj); | ||
100 | drm_gem_object_unreference_unlocked(gobj); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, | ||
105 | uint64_t *gpu_addr) | ||
106 | { | ||
107 | struct qxl_bo *qobj = obj->driver_private; | ||
108 | int r; | ||
109 | |||
110 | r = qxl_bo_reserve(qobj, false); | ||
111 | if (unlikely(r != 0)) | ||
112 | return r; | ||
113 | r = qxl_bo_pin(qobj, pin_domain, gpu_addr); | ||
114 | qxl_bo_unreserve(qobj); | ||
115 | return r; | ||
116 | } | ||
117 | |||
118 | void qxl_gem_object_unpin(struct drm_gem_object *obj) | ||
119 | { | ||
120 | struct qxl_bo *qobj = obj->driver_private; | ||
121 | int r; | ||
122 | |||
123 | r = qxl_bo_reserve(qobj, false); | ||
124 | if (likely(r == 0)) { | ||
125 | qxl_bo_unpin(qobj); | ||
126 | qxl_bo_unreserve(qobj); | ||
127 | } | ||
128 | } | ||
129 | |||
130 | int qxl_gem_set_domain(struct drm_gem_object *gobj, | ||
131 | uint32_t rdomain, uint32_t wdomain) | ||
132 | { | ||
133 | struct qxl_bo *qobj; | ||
134 | uint32_t domain; | ||
135 | int r; | ||
136 | |||
137 | /* FIXME: reeimplement */ | ||
138 | qobj = gobj->driver_private; | ||
139 | /* work out where to validate the buffer to */ | ||
140 | domain = wdomain; | ||
141 | if (!domain) | ||
142 | domain = rdomain; | ||
143 | if (!domain) { | ||
144 | /* Do nothings */ | ||
145 | pr_warn("Set domain withou domain !\n"); | ||
146 | return 0; | ||
147 | } | ||
148 | if (domain == QXL_GEM_DOMAIN_CPU) { | ||
149 | /* Asking for cpu access wait for object idle */ | ||
150 | r = qxl_bo_wait(qobj, NULL, false); | ||
151 | if (r) { | ||
152 | pr_err("Failed to wait for object !\n"); | ||
153 | return r; | ||
154 | } | ||
155 | } | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | ||
160 | { | ||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | void qxl_gem_object_close(struct drm_gem_object *obj, | ||
165 | struct drm_file *file_priv) | ||
166 | { | ||
167 | } | ||
168 | |||
169 | int qxl_gem_init(struct qxl_device *qdev) | ||
170 | { | ||
171 | INIT_LIST_HEAD(&qdev->gem.objects); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | void qxl_gem_fini(struct qxl_device *qdev) | ||
176 | { | ||
177 | qxl_bo_force_delete(qdev); | ||
178 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c new file mode 100644 index 000000000000..cf856206996b --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_image.c | |||
@@ -0,0 +1,176 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include <linux/gfp.h> | ||
27 | #include <linux/slab.h> | ||
28 | |||
29 | #include "qxl_drv.h" | ||
30 | #include "qxl_object.h" | ||
31 | |||
32 | static int | ||
33 | qxl_image_create_helper(struct qxl_device *qdev, | ||
34 | struct qxl_release *release, | ||
35 | struct qxl_bo **image_bo, | ||
36 | const uint8_t *data, | ||
37 | int width, int height, | ||
38 | int depth, unsigned int hash, | ||
39 | int stride) | ||
40 | { | ||
41 | struct qxl_image *image; | ||
42 | struct qxl_data_chunk *chunk; | ||
43 | int i; | ||
44 | int chunk_stride; | ||
45 | int linesize = width * depth / 8; | ||
46 | struct qxl_bo *chunk_bo; | ||
47 | int ret; | ||
48 | void *ptr; | ||
49 | /* Chunk */ | ||
50 | /* FIXME: Check integer overflow */ | ||
51 | /* TODO: variable number of chunks */ | ||
52 | chunk_stride = stride; /* TODO: should use linesize, but it renders | ||
53 | wrong (check the bitmaps are sent correctly | ||
54 | first) */ | ||
55 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, | ||
56 | &chunk_bo); | ||
57 | |||
58 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); | ||
59 | chunk = ptr; | ||
60 | chunk->data_size = height * chunk_stride; | ||
61 | chunk->prev_chunk = 0; | ||
62 | chunk->next_chunk = 0; | ||
63 | qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); | ||
64 | |||
65 | { | ||
66 | void *k_data, *i_data; | ||
67 | int remain; | ||
68 | int page; | ||
69 | int size; | ||
70 | if (stride == linesize && chunk_stride == stride) { | ||
71 | remain = linesize * height; | ||
72 | page = 0; | ||
73 | i_data = (void *)data; | ||
74 | |||
75 | while (remain > 0) { | ||
76 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); | ||
77 | |||
78 | if (page == 0) { | ||
79 | chunk = ptr; | ||
80 | k_data = chunk->data; | ||
81 | size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data); | ||
82 | } else { | ||
83 | k_data = ptr; | ||
84 | size = PAGE_SIZE; | ||
85 | } | ||
86 | size = min(size, remain); | ||
87 | |||
88 | memcpy(k_data, i_data, size); | ||
89 | |||
90 | qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); | ||
91 | i_data += size; | ||
92 | remain -= size; | ||
93 | page++; | ||
94 | } | ||
95 | } else { | ||
96 | unsigned page_base, page_offset, out_offset; | ||
97 | for (i = 0 ; i < height ; ++i) { | ||
98 | i_data = (void *)data + i * stride; | ||
99 | remain = linesize; | ||
100 | out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride; | ||
101 | |||
102 | while (remain > 0) { | ||
103 | page_base = out_offset & PAGE_MASK; | ||
104 | page_offset = offset_in_page(out_offset); | ||
105 | |||
106 | size = min((int)(PAGE_SIZE - page_offset), remain); | ||
107 | |||
108 | ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); | ||
109 | k_data = ptr + page_offset; | ||
110 | memcpy(k_data, i_data, size); | ||
111 | qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); | ||
112 | remain -= size; | ||
113 | i_data += size; | ||
114 | out_offset += size; | ||
115 | } | ||
116 | } | ||
117 | } | ||
118 | } | ||
119 | |||
120 | |||
121 | qxl_bo_kunmap(chunk_bo); | ||
122 | |||
123 | /* Image */ | ||
124 | ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); | ||
125 | |||
126 | ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); | ||
127 | image = ptr; | ||
128 | |||
129 | image->descriptor.id = 0; | ||
130 | image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP; | ||
131 | |||
132 | image->descriptor.flags = 0; | ||
133 | image->descriptor.width = width; | ||
134 | image->descriptor.height = height; | ||
135 | |||
136 | switch (depth) { | ||
137 | case 1: | ||
138 | /* TODO: BE? check by arch? */ | ||
139 | image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE; | ||
140 | break; | ||
141 | case 24: | ||
142 | image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT; | ||
143 | break; | ||
144 | case 32: | ||
145 | image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT; | ||
146 | break; | ||
147 | default: | ||
148 | DRM_ERROR("unsupported image bit depth\n"); | ||
149 | return -EINVAL; /* TODO: cleanup */ | ||
150 | } | ||
151 | image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; | ||
152 | image->u.bitmap.x = width; | ||
153 | image->u.bitmap.y = height; | ||
154 | image->u.bitmap.stride = chunk_stride; | ||
155 | image->u.bitmap.palette = 0; | ||
156 | image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); | ||
157 | qxl_release_add_res(qdev, release, chunk_bo); | ||
158 | qxl_bo_unreserve(chunk_bo); | ||
159 | qxl_bo_unref(&chunk_bo); | ||
160 | |||
161 | qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); | ||
162 | |||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | int qxl_image_create(struct qxl_device *qdev, | ||
167 | struct qxl_release *release, | ||
168 | struct qxl_bo **image_bo, | ||
169 | const uint8_t *data, | ||
170 | int x, int y, int width, int height, | ||
171 | int depth, int stride) | ||
172 | { | ||
173 | data += y * stride + x * (depth / 8); | ||
174 | return qxl_image_create_helper(qdev, release, image_bo, data, | ||
175 | width, height, depth, 0, stride); | ||
176 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c new file mode 100644 index 000000000000..83ca4f713f88 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c | |||
@@ -0,0 +1,411 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include "qxl_drv.h" | ||
27 | #include "qxl_object.h" | ||
28 | |||
29 | /* | ||
30 | * TODO: allocating a new gem(in qxl_bo) for each request. | ||
31 | * This is wasteful since bo's are page aligned. | ||
32 | */ | ||
33 | int qxl_alloc_ioctl(struct drm_device *dev, void *data, | ||
34 | struct drm_file *file_priv) | ||
35 | { | ||
36 | struct qxl_device *qdev = dev->dev_private; | ||
37 | struct drm_qxl_alloc *qxl_alloc = data; | ||
38 | int ret; | ||
39 | struct qxl_bo *qobj; | ||
40 | uint32_t handle; | ||
41 | u32 domain = QXL_GEM_DOMAIN_VRAM; | ||
42 | |||
43 | if (qxl_alloc->size == 0) { | ||
44 | DRM_ERROR("invalid size %d\n", qxl_alloc->size); | ||
45 | return -EINVAL; | ||
46 | } | ||
47 | ret = qxl_gem_object_create_with_handle(qdev, file_priv, | ||
48 | domain, | ||
49 | qxl_alloc->size, | ||
50 | NULL, | ||
51 | &qobj, &handle); | ||
52 | if (ret) { | ||
53 | DRM_ERROR("%s: failed to create gem ret=%d\n", | ||
54 | __func__, ret); | ||
55 | return -ENOMEM; | ||
56 | } | ||
57 | qxl_alloc->handle = handle; | ||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | int qxl_map_ioctl(struct drm_device *dev, void *data, | ||
62 | struct drm_file *file_priv) | ||
63 | { | ||
64 | struct qxl_device *qdev = dev->dev_private; | ||
65 | struct drm_qxl_map *qxl_map = data; | ||
66 | |||
67 | return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle, | ||
68 | &qxl_map->offset); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's | ||
73 | * are on vram). | ||
74 | * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) | ||
75 | */ | ||
76 | static void | ||
77 | apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | ||
78 | struct qxl_bo *src, uint64_t src_off) | ||
79 | { | ||
80 | void *reloc_page; | ||
81 | |||
82 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | ||
83 | *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, | ||
84 | src, src_off); | ||
85 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | ||
86 | } | ||
87 | |||
88 | static void | ||
89 | apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, | ||
90 | struct qxl_bo *src) | ||
91 | { | ||
92 | uint32_t id = 0; | ||
93 | void *reloc_page; | ||
94 | |||
95 | if (src && !src->is_primary) | ||
96 | id = src->surface_id; | ||
97 | |||
98 | reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); | ||
99 | *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; | ||
100 | qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); | ||
101 | } | ||
102 | |||
103 | /* return holding the reference to this object */ | ||
104 | struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, | ||
105 | struct drm_file *file_priv, uint64_t handle, | ||
106 | struct qxl_reloc_list *reloc_list) | ||
107 | { | ||
108 | struct drm_gem_object *gobj; | ||
109 | struct qxl_bo *qobj; | ||
110 | int ret; | ||
111 | |||
112 | gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); | ||
113 | if (!gobj) { | ||
114 | DRM_ERROR("bad bo handle %lld\n", handle); | ||
115 | return NULL; | ||
116 | } | ||
117 | qobj = gem_to_qxl_bo(gobj); | ||
118 | |||
119 | ret = qxl_bo_list_add(reloc_list, qobj); | ||
120 | if (ret) | ||
121 | return NULL; | ||
122 | |||
123 | return qobj; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Usage of execbuffer: | ||
128 | * Relocations need to take into account the full QXLDrawable size. | ||
129 | * However, the command as passed from user space must *not* contain the initial | ||
130 | * QXLReleaseInfo struct (first XXX bytes) | ||
131 | */ | ||
132 | int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, | ||
133 | struct drm_file *file_priv) | ||
134 | { | ||
135 | struct qxl_device *qdev = dev->dev_private; | ||
136 | struct drm_qxl_execbuffer *execbuffer = data; | ||
137 | struct drm_qxl_command user_cmd; | ||
138 | int cmd_num; | ||
139 | struct qxl_bo *reloc_src_bo; | ||
140 | struct qxl_bo *reloc_dst_bo; | ||
141 | struct drm_qxl_reloc reloc; | ||
142 | void *fb_cmd; | ||
143 | int i, ret; | ||
144 | struct qxl_reloc_list reloc_list; | ||
145 | int unwritten; | ||
146 | uint32_t reloc_dst_offset; | ||
147 | INIT_LIST_HEAD(&reloc_list.bos); | ||
148 | |||
149 | for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { | ||
150 | struct qxl_release *release; | ||
151 | struct qxl_bo *cmd_bo; | ||
152 | int release_type; | ||
153 | struct drm_qxl_command *commands = | ||
154 | (struct drm_qxl_command *)execbuffer->commands; | ||
155 | |||
156 | if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], | ||
157 | sizeof(user_cmd))) | ||
158 | return -EFAULT; | ||
159 | switch (user_cmd.type) { | ||
160 | case QXL_CMD_DRAW: | ||
161 | release_type = QXL_RELEASE_DRAWABLE; | ||
162 | break; | ||
163 | case QXL_CMD_SURFACE: | ||
164 | case QXL_CMD_CURSOR: | ||
165 | default: | ||
166 | DRM_DEBUG("Only draw commands in execbuffers\n"); | ||
167 | return -EINVAL; | ||
168 | break; | ||
169 | } | ||
170 | |||
171 | if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) | ||
172 | return -EINVAL; | ||
173 | |||
174 | ret = qxl_alloc_release_reserved(qdev, | ||
175 | sizeof(union qxl_release_info) + | ||
176 | user_cmd.command_size, | ||
177 | release_type, | ||
178 | &release, | ||
179 | &cmd_bo); | ||
180 | if (ret) | ||
181 | return ret; | ||
182 | |||
183 | /* TODO copy slow path code from i915 */ | ||
184 | fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); | ||
185 | unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); | ||
186 | qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); | ||
187 | if (unwritten) { | ||
188 | DRM_ERROR("got unwritten %d\n", unwritten); | ||
189 | qxl_release_unreserve(qdev, release); | ||
190 | qxl_release_free(qdev, release); | ||
191 | return -EFAULT; | ||
192 | } | ||
193 | |||
194 | for (i = 0 ; i < user_cmd.relocs_num; ++i) { | ||
195 | if (DRM_COPY_FROM_USER(&reloc, | ||
196 | &((struct drm_qxl_reloc *)user_cmd.relocs)[i], | ||
197 | sizeof(reloc))) { | ||
198 | qxl_bo_list_unreserve(&reloc_list, true); | ||
199 | qxl_release_unreserve(qdev, release); | ||
200 | qxl_release_free(qdev, release); | ||
201 | return -EFAULT; | ||
202 | } | ||
203 | |||
204 | /* add the bos to the list of bos to validate - | ||
205 | need to validate first then process relocs? */ | ||
206 | if (reloc.dst_handle) { | ||
207 | reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, | ||
208 | reloc.dst_handle, &reloc_list); | ||
209 | if (!reloc_dst_bo) { | ||
210 | qxl_bo_list_unreserve(&reloc_list, true); | ||
211 | qxl_release_unreserve(qdev, release); | ||
212 | qxl_release_free(qdev, release); | ||
213 | return -EINVAL; | ||
214 | } | ||
215 | reloc_dst_offset = 0; | ||
216 | } else { | ||
217 | reloc_dst_bo = cmd_bo; | ||
218 | reloc_dst_offset = release->release_offset; | ||
219 | } | ||
220 | |||
221 | /* reserve and validate the reloc dst bo */ | ||
222 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { | ||
223 | reloc_src_bo = | ||
224 | qxlhw_handle_to_bo(qdev, file_priv, | ||
225 | reloc.src_handle, &reloc_list); | ||
226 | if (!reloc_src_bo) { | ||
227 | if (reloc_dst_bo != cmd_bo) | ||
228 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | ||
229 | qxl_bo_list_unreserve(&reloc_list, true); | ||
230 | qxl_release_unreserve(qdev, release); | ||
231 | qxl_release_free(qdev, release); | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | } else | ||
235 | reloc_src_bo = NULL; | ||
236 | if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { | ||
237 | apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, | ||
238 | reloc_src_bo, reloc.src_offset); | ||
239 | } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { | ||
240 | apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); | ||
241 | } else { | ||
242 | DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); | ||
243 | return -EINVAL; | ||
244 | } | ||
245 | |||
246 | if (reloc_src_bo && reloc_src_bo != cmd_bo) { | ||
247 | qxl_release_add_res(qdev, release, reloc_src_bo); | ||
248 | drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); | ||
249 | } | ||
250 | |||
251 | if (reloc_dst_bo != cmd_bo) | ||
252 | drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); | ||
253 | } | ||
254 | qxl_fence_releaseable(qdev, release); | ||
255 | |||
256 | ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); | ||
257 | if (ret == -ERESTARTSYS) { | ||
258 | qxl_release_unreserve(qdev, release); | ||
259 | qxl_release_free(qdev, release); | ||
260 | qxl_bo_list_unreserve(&reloc_list, true); | ||
261 | return ret; | ||
262 | } | ||
263 | qxl_release_unreserve(qdev, release); | ||
264 | } | ||
265 | qxl_bo_list_unreserve(&reloc_list, 0); | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | int qxl_update_area_ioctl(struct drm_device *dev, void *data, | ||
270 | struct drm_file *file) | ||
271 | { | ||
272 | struct qxl_device *qdev = dev->dev_private; | ||
273 | struct drm_qxl_update_area *update_area = data; | ||
274 | struct qxl_rect area = {.left = update_area->left, | ||
275 | .top = update_area->top, | ||
276 | .right = update_area->right, | ||
277 | .bottom = update_area->bottom}; | ||
278 | int ret; | ||
279 | struct drm_gem_object *gobj = NULL; | ||
280 | struct qxl_bo *qobj = NULL; | ||
281 | |||
282 | if (update_area->left >= update_area->right || | ||
283 | update_area->top >= update_area->bottom) | ||
284 | return -EINVAL; | ||
285 | |||
286 | gobj = drm_gem_object_lookup(dev, file, update_area->handle); | ||
287 | if (gobj == NULL) | ||
288 | return -ENOENT; | ||
289 | |||
290 | qobj = gem_to_qxl_bo(gobj); | ||
291 | |||
292 | ret = qxl_bo_reserve(qobj, false); | ||
293 | if (ret) | ||
294 | goto out; | ||
295 | |||
296 | if (!qobj->pin_count) { | ||
297 | ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, | ||
298 | true, false); | ||
299 | if (unlikely(ret)) | ||
300 | goto out; | ||
301 | } | ||
302 | |||
303 | ret = qxl_bo_check_id(qdev, qobj); | ||
304 | if (ret) | ||
305 | goto out2; | ||
306 | if (!qobj->surface_id) | ||
307 | DRM_ERROR("got update area for surface with no id %d\n", update_area->handle); | ||
308 | ret = qxl_io_update_area(qdev, qobj, &area); | ||
309 | |||
310 | out2: | ||
311 | qxl_bo_unreserve(qobj); | ||
312 | |||
313 | out: | ||
314 | drm_gem_object_unreference_unlocked(gobj); | ||
315 | return ret; | ||
316 | } | ||
317 | |||
318 | static int qxl_getparam_ioctl(struct drm_device *dev, void *data, | ||
319 | struct drm_file *file_priv) | ||
320 | { | ||
321 | struct qxl_device *qdev = dev->dev_private; | ||
322 | struct drm_qxl_getparam *param = data; | ||
323 | |||
324 | switch (param->param) { | ||
325 | case QXL_PARAM_NUM_SURFACES: | ||
326 | param->value = qdev->rom->n_surfaces; | ||
327 | break; | ||
328 | case QXL_PARAM_MAX_RELOCS: | ||
329 | param->value = QXL_MAX_RES; | ||
330 | break; | ||
331 | default: | ||
332 | return -EINVAL; | ||
333 | } | ||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, | ||
338 | struct drm_file *file_priv) | ||
339 | { | ||
340 | struct qxl_device *qdev = dev->dev_private; | ||
341 | struct drm_qxl_clientcap *param = data; | ||
342 | int byte, idx; | ||
343 | |||
344 | byte = param->index / 8; | ||
345 | idx = param->index % 8; | ||
346 | |||
347 | if (qdev->pdev->revision < 4) | ||
348 | return -ENOSYS; | ||
349 | |||
350 | if (byte > 58) | ||
351 | return -ENOSYS; | ||
352 | |||
353 | if (qdev->rom->client_capabilities[byte] & (1 << idx)) | ||
354 | return 0; | ||
355 | return -ENOSYS; | ||
356 | } | ||
357 | |||
358 | static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, | ||
359 | struct drm_file *file) | ||
360 | { | ||
361 | struct qxl_device *qdev = dev->dev_private; | ||
362 | struct drm_qxl_alloc_surf *param = data; | ||
363 | struct qxl_bo *qobj; | ||
364 | int handle; | ||
365 | int ret; | ||
366 | int size, actual_stride; | ||
367 | struct qxl_surface surf; | ||
368 | |||
369 | /* work out size allocate bo with handle */ | ||
370 | actual_stride = param->stride < 0 ? -param->stride : param->stride; | ||
371 | size = actual_stride * param->height + actual_stride; | ||
372 | |||
373 | surf.format = param->format; | ||
374 | surf.width = param->width; | ||
375 | surf.height = param->height; | ||
376 | surf.stride = param->stride; | ||
377 | surf.data = 0; | ||
378 | |||
379 | ret = qxl_gem_object_create_with_handle(qdev, file, | ||
380 | QXL_GEM_DOMAIN_SURFACE, | ||
381 | size, | ||
382 | &surf, | ||
383 | &qobj, &handle); | ||
384 | if (ret) { | ||
385 | DRM_ERROR("%s: failed to create gem ret=%d\n", | ||
386 | __func__, ret); | ||
387 | return -ENOMEM; | ||
388 | } else | ||
389 | param->handle = handle; | ||
390 | return ret; | ||
391 | } | ||
392 | |||
393 | struct drm_ioctl_desc qxl_ioctls[] = { | ||
394 | DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED), | ||
395 | |||
396 | DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED), | ||
397 | |||
398 | DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, | ||
399 | DRM_AUTH|DRM_UNLOCKED), | ||
400 | DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, | ||
401 | DRM_AUTH|DRM_UNLOCKED), | ||
402 | DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, | ||
403 | DRM_AUTH|DRM_UNLOCKED), | ||
404 | DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, | ||
405 | DRM_AUTH|DRM_UNLOCKED), | ||
406 | |||
407 | DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, | ||
408 | DRM_AUTH|DRM_UNLOCKED), | ||
409 | }; | ||
410 | |||
411 | int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls); | ||
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c new file mode 100644 index 000000000000..21393dc4700a --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_irq.c | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include "qxl_drv.h" | ||
27 | |||
28 | irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS) | ||
29 | { | ||
30 | struct drm_device *dev = (struct drm_device *) arg; | ||
31 | struct qxl_device *qdev = (struct qxl_device *)dev->dev_private; | ||
32 | uint32_t pending; | ||
33 | |||
34 | pending = xchg(&qdev->ram_header->int_pending, 0); | ||
35 | |||
36 | atomic_inc(&qdev->irq_received); | ||
37 | |||
38 | if (pending & QXL_INTERRUPT_DISPLAY) { | ||
39 | atomic_inc(&qdev->irq_received_display); | ||
40 | wake_up_all(&qdev->display_event); | ||
41 | qxl_queue_garbage_collect(qdev, false); | ||
42 | } | ||
43 | if (pending & QXL_INTERRUPT_CURSOR) { | ||
44 | atomic_inc(&qdev->irq_received_cursor); | ||
45 | wake_up_all(&qdev->cursor_event); | ||
46 | } | ||
47 | if (pending & QXL_INTERRUPT_IO_CMD) { | ||
48 | atomic_inc(&qdev->irq_received_io_cmd); | ||
49 | wake_up_all(&qdev->io_cmd_event); | ||
50 | } | ||
51 | if (pending & QXL_INTERRUPT_ERROR) { | ||
52 | /* TODO: log it, reset device (only way to exit this condition) | ||
53 | * (do it a certain number of times, afterwards admit defeat, | ||
54 | * to avoid endless loops). | ||
55 | */ | ||
56 | qdev->irq_received_error++; | ||
57 | qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__); | ||
58 | } | ||
59 | if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { | ||
60 | qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n"); | ||
61 | schedule_work(&qdev->client_monitors_config_work); | ||
62 | } | ||
63 | qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; | ||
64 | outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ); | ||
65 | return IRQ_HANDLED; | ||
66 | } | ||
67 | |||
68 | static void qxl_client_monitors_config_work_func(struct work_struct *work) | ||
69 | { | ||
70 | struct qxl_device *qdev = container_of(work, struct qxl_device, | ||
71 | client_monitors_config_work); | ||
72 | |||
73 | qxl_display_read_client_monitors_config(qdev); | ||
74 | } | ||
75 | |||
76 | int qxl_irq_init(struct qxl_device *qdev) | ||
77 | { | ||
78 | int ret; | ||
79 | |||
80 | init_waitqueue_head(&qdev->display_event); | ||
81 | init_waitqueue_head(&qdev->cursor_event); | ||
82 | init_waitqueue_head(&qdev->io_cmd_event); | ||
83 | INIT_WORK(&qdev->client_monitors_config_work, | ||
84 | qxl_client_monitors_config_work_func); | ||
85 | atomic_set(&qdev->irq_received, 0); | ||
86 | atomic_set(&qdev->irq_received_display, 0); | ||
87 | atomic_set(&qdev->irq_received_cursor, 0); | ||
88 | atomic_set(&qdev->irq_received_io_cmd, 0); | ||
89 | qdev->irq_received_error = 0; | ||
90 | ret = drm_irq_install(qdev->ddev); | ||
91 | qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; | ||
92 | if (unlikely(ret != 0)) { | ||
93 | DRM_ERROR("Failed installing irq: %d\n", ret); | ||
94 | return 1; | ||
95 | } | ||
96 | return 0; | ||
97 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c new file mode 100644 index 000000000000..036e0de13412 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_kms.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include "qxl_drv.h" | ||
27 | #include "qxl_object.h" | ||
28 | |||
29 | #include <linux/io-mapping.h> | ||
30 | |||
31 | int qxl_log_level; | ||
32 | |||
33 | static void qxl_dump_mode(struct qxl_device *qdev, void *p) | ||
34 | { | ||
35 | struct qxl_mode *m = p; | ||
36 | DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n", | ||
37 | m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili, | ||
38 | m->y_mili, m->orientation); | ||
39 | } | ||
40 | |||
41 | static bool qxl_check_device(struct qxl_device *qdev) | ||
42 | { | ||
43 | struct qxl_rom *rom = qdev->rom; | ||
44 | int mode_offset; | ||
45 | int i; | ||
46 | |||
47 | if (rom->magic != 0x4f525851) { | ||
48 | DRM_ERROR("bad rom signature %x\n", rom->magic); | ||
49 | return false; | ||
50 | } | ||
51 | |||
52 | DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); | ||
53 | DRM_INFO("Compression level %d log level %d\n", rom->compression_level, | ||
54 | rom->log_level); | ||
55 | DRM_INFO("Currently using mode #%d, list at 0x%x\n", | ||
56 | rom->mode, rom->modes_offset); | ||
57 | DRM_INFO("%d io pages at offset 0x%x\n", | ||
58 | rom->num_io_pages, rom->pages_offset); | ||
59 | DRM_INFO("%d byte draw area at offset 0x%x\n", | ||
60 | rom->surface0_area_size, rom->draw_area_offset); | ||
61 | |||
62 | qdev->vram_size = rom->surface0_area_size; | ||
63 | DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); | ||
64 | |||
65 | mode_offset = rom->modes_offset / 4; | ||
66 | qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset]; | ||
67 | DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset, | ||
68 | qdev->mode_info.num_modes); | ||
69 | qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1); | ||
70 | for (i = 0; i < qdev->mode_info.num_modes; i++) | ||
71 | qxl_dump_mode(qdev, qdev->mode_info.modes + i); | ||
72 | return true; | ||
73 | } | ||
74 | |||
75 | static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset, | ||
76 | unsigned long start_phys_addr, unsigned long end_phys_addr) | ||
77 | { | ||
78 | uint64_t high_bits; | ||
79 | struct qxl_memslot *slot; | ||
80 | uint8_t slot_index; | ||
81 | struct qxl_ram_header *ram_header = qdev->ram_header; | ||
82 | |||
83 | slot_index = qdev->rom->slots_start + slot_index_offset; | ||
84 | slot = &qdev->mem_slots[slot_index]; | ||
85 | slot->start_phys_addr = start_phys_addr; | ||
86 | slot->end_phys_addr = end_phys_addr; | ||
87 | ram_header->mem_slot.mem_start = slot->start_phys_addr; | ||
88 | ram_header->mem_slot.mem_end = slot->end_phys_addr; | ||
89 | qxl_io_memslot_add(qdev, slot_index); | ||
90 | slot->generation = qdev->rom->slot_generation; | ||
91 | high_bits = slot_index << qdev->slot_gen_bits; | ||
92 | high_bits |= slot->generation; | ||
93 | high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits)); | ||
94 | slot->high_bits = high_bits; | ||
95 | return slot_index; | ||
96 | } | ||
97 | |||
98 | static void qxl_gc_work(struct work_struct *work) | ||
99 | { | ||
100 | struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); | ||
101 | qxl_garbage_collect(qdev); | ||
102 | } | ||
103 | |||
104 | int qxl_device_init(struct qxl_device *qdev, | ||
105 | struct drm_device *ddev, | ||
106 | struct pci_dev *pdev, | ||
107 | unsigned long flags) | ||
108 | { | ||
109 | int r; | ||
110 | |||
111 | qdev->dev = &pdev->dev; | ||
112 | qdev->ddev = ddev; | ||
113 | qdev->pdev = pdev; | ||
114 | qdev->flags = flags; | ||
115 | |||
116 | mutex_init(&qdev->gem.mutex); | ||
117 | mutex_init(&qdev->update_area_mutex); | ||
118 | mutex_init(&qdev->release_mutex); | ||
119 | mutex_init(&qdev->surf_evict_mutex); | ||
120 | INIT_LIST_HEAD(&qdev->gem.objects); | ||
121 | |||
122 | qdev->rom_base = pci_resource_start(pdev, 2); | ||
123 | qdev->rom_size = pci_resource_len(pdev, 2); | ||
124 | qdev->vram_base = pci_resource_start(pdev, 0); | ||
125 | qdev->surfaceram_base = pci_resource_start(pdev, 1); | ||
126 | qdev->surfaceram_size = pci_resource_len(pdev, 1); | ||
127 | qdev->io_base = pci_resource_start(pdev, 3); | ||
128 | |||
129 | qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); | ||
130 | qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size); | ||
131 | DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n", | ||
132 | (void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0), | ||
133 | (int)pci_resource_len(pdev, 0) / 1024 / 1024, | ||
134 | (int)pci_resource_len(pdev, 0) / 1024, | ||
135 | (void *)qdev->surfaceram_base, | ||
136 | (void *)pci_resource_end(pdev, 1), | ||
137 | (int)qdev->surfaceram_size / 1024 / 1024, | ||
138 | (int)qdev->surfaceram_size / 1024); | ||
139 | |||
140 | qdev->rom = ioremap(qdev->rom_base, qdev->rom_size); | ||
141 | if (!qdev->rom) { | ||
142 | pr_err("Unable to ioremap ROM\n"); | ||
143 | return -ENOMEM; | ||
144 | } | ||
145 | |||
146 | qxl_check_device(qdev); | ||
147 | |||
148 | r = qxl_bo_init(qdev); | ||
149 | if (r) { | ||
150 | DRM_ERROR("bo init failed %d\n", r); | ||
151 | return r; | ||
152 | } | ||
153 | |||
154 | qdev->ram_header = ioremap(qdev->vram_base + | ||
155 | qdev->rom->ram_header_offset, | ||
156 | sizeof(*qdev->ram_header)); | ||
157 | |||
158 | qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), | ||
159 | sizeof(struct qxl_command), | ||
160 | QXL_COMMAND_RING_SIZE, | ||
161 | qdev->io_base + QXL_IO_NOTIFY_CMD, | ||
162 | false, | ||
163 | &qdev->display_event); | ||
164 | |||
165 | qdev->cursor_ring = qxl_ring_create( | ||
166 | &(qdev->ram_header->cursor_ring_hdr), | ||
167 | sizeof(struct qxl_command), | ||
168 | QXL_CURSOR_RING_SIZE, | ||
169 | qdev->io_base + QXL_IO_NOTIFY_CMD, | ||
170 | false, | ||
171 | &qdev->cursor_event); | ||
172 | |||
173 | qdev->release_ring = qxl_ring_create( | ||
174 | &(qdev->ram_header->release_ring_hdr), | ||
175 | sizeof(uint64_t), | ||
176 | QXL_RELEASE_RING_SIZE, 0, true, | ||
177 | NULL); | ||
178 | |||
179 | /* TODO - slot initialization should happen on reset. where is our | ||
180 | * reset handler? */ | ||
181 | qdev->n_mem_slots = qdev->rom->slots_end; | ||
182 | qdev->slot_gen_bits = qdev->rom->slot_gen_bits; | ||
183 | qdev->slot_id_bits = qdev->rom->slot_id_bits; | ||
184 | qdev->va_slot_mask = | ||
185 | (~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits); | ||
186 | |||
187 | qdev->mem_slots = | ||
188 | kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot), | ||
189 | GFP_KERNEL); | ||
190 | |||
191 | idr_init(&qdev->release_idr); | ||
192 | spin_lock_init(&qdev->release_idr_lock); | ||
193 | |||
194 | idr_init(&qdev->surf_id_idr); | ||
195 | spin_lock_init(&qdev->surf_id_idr_lock); | ||
196 | |||
197 | mutex_init(&qdev->async_io_mutex); | ||
198 | |||
199 | /* reset the device into a known state - no memslots, no primary | ||
200 | * created, no surfaces. */ | ||
201 | qxl_io_reset(qdev); | ||
202 | |||
203 | /* must initialize irq before first async io - slot creation */ | ||
204 | r = qxl_irq_init(qdev); | ||
205 | if (r) | ||
206 | return r; | ||
207 | |||
208 | /* | ||
209 | * Note that virtual is surface0. We rely on the single ioremap done | ||
210 | * before. | ||
211 | */ | ||
212 | qdev->main_mem_slot = setup_slot(qdev, 0, | ||
213 | (unsigned long)qdev->vram_base, | ||
214 | (unsigned long)qdev->vram_base + qdev->rom->ram_header_offset); | ||
215 | qdev->surfaces_mem_slot = setup_slot(qdev, 1, | ||
216 | (unsigned long)qdev->surfaceram_base, | ||
217 | (unsigned long)qdev->surfaceram_base + qdev->surfaceram_size); | ||
218 | DRM_INFO("main mem slot %d [%lx,%x)\n", | ||
219 | qdev->main_mem_slot, | ||
220 | (unsigned long)qdev->vram_base, qdev->rom->ram_header_offset); | ||
221 | |||
222 | |||
223 | qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); | ||
224 | INIT_WORK(&qdev->gc_work, qxl_gc_work); | ||
225 | |||
226 | r = qxl_fb_init(qdev); | ||
227 | if (r) | ||
228 | return r; | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | void qxl_device_fini(struct qxl_device *qdev) | ||
234 | { | ||
235 | if (qdev->current_release_bo[0]) | ||
236 | qxl_bo_unref(&qdev->current_release_bo[0]); | ||
237 | if (qdev->current_release_bo[1]) | ||
238 | qxl_bo_unref(&qdev->current_release_bo[1]); | ||
239 | flush_workqueue(qdev->gc_queue); | ||
240 | destroy_workqueue(qdev->gc_queue); | ||
241 | qdev->gc_queue = NULL; | ||
242 | |||
243 | qxl_ring_free(qdev->command_ring); | ||
244 | qxl_ring_free(qdev->cursor_ring); | ||
245 | qxl_ring_free(qdev->release_ring); | ||
246 | qxl_bo_fini(qdev); | ||
247 | io_mapping_free(qdev->surface_mapping); | ||
248 | io_mapping_free(qdev->vram_mapping); | ||
249 | iounmap(qdev->ram_header); | ||
250 | iounmap(qdev->rom); | ||
251 | qdev->rom = NULL; | ||
252 | qdev->mode_info.modes = NULL; | ||
253 | qdev->mode_info.num_modes = 0; | ||
254 | qxl_debugfs_remove_files(qdev); | ||
255 | } | ||
256 | |||
257 | int qxl_driver_unload(struct drm_device *dev) | ||
258 | { | ||
259 | struct qxl_device *qdev = dev->dev_private; | ||
260 | |||
261 | if (qdev == NULL) | ||
262 | return 0; | ||
263 | qxl_modeset_fini(qdev); | ||
264 | qxl_device_fini(qdev); | ||
265 | |||
266 | kfree(qdev); | ||
267 | dev->dev_private = NULL; | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | int qxl_driver_load(struct drm_device *dev, unsigned long flags) | ||
272 | { | ||
273 | struct qxl_device *qdev; | ||
274 | int r; | ||
275 | |||
276 | /* require kms */ | ||
277 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) | ||
278 | return -ENODEV; | ||
279 | |||
280 | qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); | ||
281 | if (qdev == NULL) | ||
282 | return -ENOMEM; | ||
283 | |||
284 | dev->dev_private = qdev; | ||
285 | |||
286 | r = qxl_device_init(qdev, dev, dev->pdev, flags); | ||
287 | if (r) | ||
288 | goto out; | ||
289 | |||
290 | r = qxl_modeset_init(qdev); | ||
291 | if (r) { | ||
292 | qxl_driver_unload(dev); | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | return 0; | ||
297 | out: | ||
298 | kfree(qdev); | ||
299 | return r; | ||
300 | } | ||
301 | |||
302 | |||
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c new file mode 100644 index 000000000000..51efb94a5dee --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_object.c | |||
@@ -0,0 +1,365 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include "qxl_drv.h" | ||
27 | #include "qxl_object.h" | ||
28 | |||
29 | #include <linux/io-mapping.h> | ||
30 | static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) | ||
31 | { | ||
32 | struct qxl_bo *bo; | ||
33 | struct qxl_device *qdev; | ||
34 | |||
35 | bo = container_of(tbo, struct qxl_bo, tbo); | ||
36 | qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | ||
37 | |||
38 | qxl_surface_evict(qdev, bo, false); | ||
39 | qxl_fence_fini(&bo->fence); | ||
40 | mutex_lock(&qdev->gem.mutex); | ||
41 | list_del_init(&bo->list); | ||
42 | mutex_unlock(&qdev->gem.mutex); | ||
43 | drm_gem_object_release(&bo->gem_base); | ||
44 | kfree(bo); | ||
45 | } | ||
46 | |||
47 | bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) | ||
48 | { | ||
49 | if (bo->destroy == &qxl_ttm_bo_destroy) | ||
50 | return true; | ||
51 | return false; | ||
52 | } | ||
53 | |||
54 | void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) | ||
55 | { | ||
56 | u32 c = 0; | ||
57 | |||
58 | qbo->placement.fpfn = 0; | ||
59 | qbo->placement.lpfn = 0; | ||
60 | qbo->placement.placement = qbo->placements; | ||
61 | qbo->placement.busy_placement = qbo->placements; | ||
62 | if (domain & QXL_GEM_DOMAIN_VRAM) | ||
63 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; | ||
64 | if (domain & QXL_GEM_DOMAIN_SURFACE) | ||
65 | qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; | ||
66 | if (domain & QXL_GEM_DOMAIN_CPU) | ||
67 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
68 | if (!c) | ||
69 | qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
70 | qbo->placement.num_placement = c; | ||
71 | qbo->placement.num_busy_placement = c; | ||
72 | } | ||
73 | |||
74 | |||
75 | int qxl_bo_create(struct qxl_device *qdev, | ||
76 | unsigned long size, bool kernel, u32 domain, | ||
77 | struct qxl_surface *surf, | ||
78 | struct qxl_bo **bo_ptr) | ||
79 | { | ||
80 | struct qxl_bo *bo; | ||
81 | enum ttm_bo_type type; | ||
82 | int r; | ||
83 | |||
84 | if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) | ||
85 | qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; | ||
86 | if (kernel) | ||
87 | type = ttm_bo_type_kernel; | ||
88 | else | ||
89 | type = ttm_bo_type_device; | ||
90 | *bo_ptr = NULL; | ||
91 | bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); | ||
92 | if (bo == NULL) | ||
93 | return -ENOMEM; | ||
94 | size = roundup(size, PAGE_SIZE); | ||
95 | r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size); | ||
96 | if (unlikely(r)) { | ||
97 | kfree(bo); | ||
98 | return r; | ||
99 | } | ||
100 | bo->gem_base.driver_private = NULL; | ||
101 | bo->type = domain; | ||
102 | bo->pin_count = 0; | ||
103 | bo->surface_id = 0; | ||
104 | qxl_fence_init(qdev, &bo->fence); | ||
105 | INIT_LIST_HEAD(&bo->list); | ||
106 | atomic_set(&bo->reserve_count, 0); | ||
107 | if (surf) | ||
108 | bo->surf = *surf; | ||
109 | |||
110 | qxl_ttm_placement_from_domain(bo, domain); | ||
111 | |||
112 | r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, | ||
113 | &bo->placement, 0, !kernel, NULL, size, | ||
114 | NULL, &qxl_ttm_bo_destroy); | ||
115 | if (unlikely(r != 0)) { | ||
116 | if (r != -ERESTARTSYS) | ||
117 | dev_err(qdev->dev, | ||
118 | "object_init failed for (%lu, 0x%08X)\n", | ||
119 | size, domain); | ||
120 | return r; | ||
121 | } | ||
122 | *bo_ptr = bo; | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) | ||
127 | { | ||
128 | bool is_iomem; | ||
129 | int r; | ||
130 | |||
131 | if (bo->kptr) { | ||
132 | if (ptr) | ||
133 | *ptr = bo->kptr; | ||
134 | return 0; | ||
135 | } | ||
136 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); | ||
137 | if (r) | ||
138 | return r; | ||
139 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | ||
140 | if (ptr) | ||
141 | *ptr = bo->kptr; | ||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, | ||
146 | struct qxl_bo *bo, int page_offset) | ||
147 | { | ||
148 | struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; | ||
149 | void *rptr; | ||
150 | int ret; | ||
151 | struct io_mapping *map; | ||
152 | |||
153 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | ||
154 | map = qdev->vram_mapping; | ||
155 | else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) | ||
156 | map = qdev->surface_mapping; | ||
157 | else | ||
158 | goto fallback; | ||
159 | |||
160 | (void) ttm_mem_io_lock(man, false); | ||
161 | ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); | ||
162 | ttm_mem_io_unlock(man); | ||
163 | |||
164 | return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); | ||
165 | fallback: | ||
166 | if (bo->kptr) { | ||
167 | rptr = bo->kptr + (page_offset * PAGE_SIZE); | ||
168 | return rptr; | ||
169 | } | ||
170 | |||
171 | ret = qxl_bo_kmap(bo, &rptr); | ||
172 | if (ret) | ||
173 | return NULL; | ||
174 | |||
175 | rptr += page_offset * PAGE_SIZE; | ||
176 | return rptr; | ||
177 | } | ||
178 | |||
179 | void qxl_bo_kunmap(struct qxl_bo *bo) | ||
180 | { | ||
181 | if (bo->kptr == NULL) | ||
182 | return; | ||
183 | bo->kptr = NULL; | ||
184 | ttm_bo_kunmap(&bo->kmap); | ||
185 | } | ||
186 | |||
187 | void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, | ||
188 | struct qxl_bo *bo, void *pmap) | ||
189 | { | ||
190 | struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; | ||
191 | struct io_mapping *map; | ||
192 | |||
193 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | ||
194 | map = qdev->vram_mapping; | ||
195 | else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0) | ||
196 | map = qdev->surface_mapping; | ||
197 | else | ||
198 | goto fallback; | ||
199 | |||
200 | io_mapping_unmap_atomic(pmap); | ||
201 | |||
202 | (void) ttm_mem_io_lock(man, false); | ||
203 | ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); | ||
204 | ttm_mem_io_unlock(man); | ||
205 | return ; | ||
206 | fallback: | ||
207 | qxl_bo_kunmap(bo); | ||
208 | } | ||
209 | |||
210 | void qxl_bo_unref(struct qxl_bo **bo) | ||
211 | { | ||
212 | struct ttm_buffer_object *tbo; | ||
213 | |||
214 | if ((*bo) == NULL) | ||
215 | return; | ||
216 | tbo = &((*bo)->tbo); | ||
217 | ttm_bo_unref(&tbo); | ||
218 | if (tbo == NULL) | ||
219 | *bo = NULL; | ||
220 | } | ||
221 | |||
222 | struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) | ||
223 | { | ||
224 | ttm_bo_reference(&bo->tbo); | ||
225 | return bo; | ||
226 | } | ||
227 | |||
228 | int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) | ||
229 | { | ||
230 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | ||
231 | int r, i; | ||
232 | |||
233 | if (bo->pin_count) { | ||
234 | bo->pin_count++; | ||
235 | if (gpu_addr) | ||
236 | *gpu_addr = qxl_bo_gpu_offset(bo); | ||
237 | return 0; | ||
238 | } | ||
239 | qxl_ttm_placement_from_domain(bo, domain); | ||
240 | for (i = 0; i < bo->placement.num_placement; i++) | ||
241 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | ||
242 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
243 | if (likely(r == 0)) { | ||
244 | bo->pin_count = 1; | ||
245 | if (gpu_addr != NULL) | ||
246 | *gpu_addr = qxl_bo_gpu_offset(bo); | ||
247 | } | ||
248 | if (unlikely(r != 0)) | ||
249 | dev_err(qdev->dev, "%p pin failed\n", bo); | ||
250 | return r; | ||
251 | } | ||
252 | |||
253 | int qxl_bo_unpin(struct qxl_bo *bo) | ||
254 | { | ||
255 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | ||
256 | int r, i; | ||
257 | |||
258 | if (!bo->pin_count) { | ||
259 | dev_warn(qdev->dev, "%p unpin not necessary\n", bo); | ||
260 | return 0; | ||
261 | } | ||
262 | bo->pin_count--; | ||
263 | if (bo->pin_count) | ||
264 | return 0; | ||
265 | for (i = 0; i < bo->placement.num_placement; i++) | ||
266 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | ||
267 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | ||
268 | if (unlikely(r != 0)) | ||
269 | dev_err(qdev->dev, "%p validate failed for unpin\n", bo); | ||
270 | return r; | ||
271 | } | ||
272 | |||
273 | void qxl_bo_force_delete(struct qxl_device *qdev) | ||
274 | { | ||
275 | struct qxl_bo *bo, *n; | ||
276 | |||
277 | if (list_empty(&qdev->gem.objects)) | ||
278 | return; | ||
279 | dev_err(qdev->dev, "Userspace still has active objects !\n"); | ||
280 | list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { | ||
281 | mutex_lock(&qdev->ddev->struct_mutex); | ||
282 | dev_err(qdev->dev, "%p %p %lu %lu force free\n", | ||
283 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, | ||
284 | *((unsigned long *)&bo->gem_base.refcount)); | ||
285 | mutex_lock(&qdev->gem.mutex); | ||
286 | list_del_init(&bo->list); | ||
287 | mutex_unlock(&qdev->gem.mutex); | ||
288 | /* this should unref the ttm bo */ | ||
289 | drm_gem_object_unreference(&bo->gem_base); | ||
290 | mutex_unlock(&qdev->ddev->struct_mutex); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | int qxl_bo_init(struct qxl_device *qdev) | ||
295 | { | ||
296 | return qxl_ttm_init(qdev); | ||
297 | } | ||
298 | |||
299 | void qxl_bo_fini(struct qxl_device *qdev) | ||
300 | { | ||
301 | qxl_ttm_fini(qdev); | ||
302 | } | ||
303 | |||
304 | int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) | ||
305 | { | ||
306 | int ret; | ||
307 | if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { | ||
308 | /* allocate a surface id for this surface now */ | ||
309 | ret = qxl_surface_id_alloc(qdev, bo); | ||
310 | if (ret) | ||
311 | return ret; | ||
312 | |||
313 | ret = qxl_hw_surface_alloc(qdev, bo, NULL); | ||
314 | if (ret) | ||
315 | return ret; | ||
316 | } | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) | ||
321 | { | ||
322 | struct qxl_bo_list *entry, *sf; | ||
323 | |||
324 | list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { | ||
325 | qxl_bo_unreserve(entry->bo); | ||
326 | list_del(&entry->lhead); | ||
327 | kfree(entry); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) | ||
332 | { | ||
333 | struct qxl_bo_list *entry; | ||
334 | int ret; | ||
335 | |||
336 | list_for_each_entry(entry, &reloc_list->bos, lhead) { | ||
337 | if (entry->bo == bo) | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); | ||
342 | if (!entry) | ||
343 | return -ENOMEM; | ||
344 | |||
345 | entry->bo = bo; | ||
346 | list_add(&entry->lhead, &reloc_list->bos); | ||
347 | |||
348 | ret = qxl_bo_reserve(bo, false); | ||
349 | if (ret) | ||
350 | return ret; | ||
351 | |||
352 | if (!bo->pin_count) { | ||
353 | qxl_ttm_placement_from_domain(bo, bo->type); | ||
354 | ret = ttm_bo_validate(&bo->tbo, &bo->placement, | ||
355 | true, false); | ||
356 | if (ret) | ||
357 | return ret; | ||
358 | } | ||
359 | |||
360 | /* allocate a surface for reserved + validated buffers */ | ||
361 | ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); | ||
362 | if (ret) | ||
363 | return ret; | ||
364 | return 0; | ||
365 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h new file mode 100644 index 000000000000..b4fd89fbd8b7 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_object.h | |||
@@ -0,0 +1,112 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | #ifndef QXL_OBJECT_H | ||
26 | #define QXL_OBJECT_H | ||
27 | |||
28 | #include "qxl_drv.h" | ||
29 | |||
30 | static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) | ||
31 | { | ||
32 | int r; | ||
33 | |||
34 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
35 | if (unlikely(r != 0)) { | ||
36 | if (r != -ERESTARTSYS) { | ||
37 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | ||
38 | dev_err(qdev->dev, "%p reserve failed\n", bo); | ||
39 | } | ||
40 | return r; | ||
41 | } | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static inline void qxl_bo_unreserve(struct qxl_bo *bo) | ||
46 | { | ||
47 | ttm_bo_unreserve(&bo->tbo); | ||
48 | } | ||
49 | |||
50 | static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo) | ||
51 | { | ||
52 | return bo->tbo.offset; | ||
53 | } | ||
54 | |||
55 | static inline unsigned long qxl_bo_size(struct qxl_bo *bo) | ||
56 | { | ||
57 | return bo->tbo.num_pages << PAGE_SHIFT; | ||
58 | } | ||
59 | |||
60 | static inline bool qxl_bo_is_reserved(struct qxl_bo *bo) | ||
61 | { | ||
62 | return !!atomic_read(&bo->tbo.reserved); | ||
63 | } | ||
64 | |||
65 | static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) | ||
66 | { | ||
67 | return bo->tbo.addr_space_offset; | ||
68 | } | ||
69 | |||
70 | static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, | ||
71 | bool no_wait) | ||
72 | { | ||
73 | int r; | ||
74 | |||
75 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | ||
76 | if (unlikely(r != 0)) { | ||
77 | if (r != -ERESTARTSYS) { | ||
78 | struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; | ||
79 | dev_err(qdev->dev, "%p reserve failed for wait\n", | ||
80 | bo); | ||
81 | } | ||
82 | return r; | ||
83 | } | ||
84 | spin_lock(&bo->tbo.bdev->fence_lock); | ||
85 | if (mem_type) | ||
86 | *mem_type = bo->tbo.mem.mem_type; | ||
87 | if (bo->tbo.sync_obj) | ||
88 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | ||
89 | spin_unlock(&bo->tbo.bdev->fence_lock); | ||
90 | ttm_bo_unreserve(&bo->tbo); | ||
91 | return r; | ||
92 | } | ||
93 | |||
94 | extern int qxl_bo_create(struct qxl_device *qdev, | ||
95 | unsigned long size, | ||
96 | bool kernel, u32 domain, | ||
97 | struct qxl_surface *surf, | ||
98 | struct qxl_bo **bo_ptr); | ||
99 | extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); | ||
100 | extern void qxl_bo_kunmap(struct qxl_bo *bo); | ||
101 | void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); | ||
102 | void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); | ||
103 | extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); | ||
104 | extern void qxl_bo_unref(struct qxl_bo **bo); | ||
105 | extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); | ||
106 | extern int qxl_bo_unpin(struct qxl_bo *bo); | ||
107 | extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); | ||
108 | extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); | ||
109 | |||
110 | extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); | ||
111 | extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); | ||
112 | #endif | ||
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c new file mode 100644 index 000000000000..1600781d8cbc --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_release.c | |||
@@ -0,0 +1,307 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat, Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * on the rights to use, copy, modify, merge, publish, distribute, sub | ||
8 | * license, and/or sell copies of the Software, and to permit persons to whom | ||
9 | * the Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the next | ||
12 | * paragraph) shall be included in all copies or substantial portions of the | ||
13 | * Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER | ||
19 | * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
20 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
21 | */ | ||
22 | #include "qxl_drv.h" | ||
23 | #include "qxl_object.h" | ||
24 | |||
25 | /* | ||
26 | * drawable cmd cache - allocate a bunch of VRAM pages, suballocate | ||
27 | * into 256 byte chunks for now - gives 16 cmds per page. | ||
28 | * | ||
29 | * use an ida to index into the chunks? | ||
30 | */ | ||
31 | /* manage releaseables */ | ||
32 | /* stack them 16 high for now -drawable object is 191 */ | ||
33 | #define RELEASE_SIZE 256 | ||
34 | #define RELEASES_PER_BO (4096 / RELEASE_SIZE) | ||
35 | /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ | ||
36 | #define SURFACE_RELEASE_SIZE 128 | ||
37 | #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE) | ||
38 | |||
39 | static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; | ||
40 | static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; | ||
41 | uint64_t | ||
42 | qxl_release_alloc(struct qxl_device *qdev, int type, | ||
43 | struct qxl_release **ret) | ||
44 | { | ||
45 | struct qxl_release *release; | ||
46 | int handle = 0; | ||
47 | size_t size = sizeof(*release); | ||
48 | int idr_ret; | ||
49 | |||
50 | release = kmalloc(size, GFP_KERNEL); | ||
51 | if (!release) { | ||
52 | DRM_ERROR("Out of memory\n"); | ||
53 | return 0; | ||
54 | } | ||
55 | release->type = type; | ||
56 | release->bo_count = 0; | ||
57 | release->release_offset = 0; | ||
58 | release->surface_release_id = 0; | ||
59 | again: | ||
60 | if (idr_pre_get(&qdev->release_idr, GFP_KERNEL) == 0) { | ||
61 | DRM_ERROR("Out of memory for release idr\n"); | ||
62 | kfree(release); | ||
63 | goto release_fail; | ||
64 | } | ||
65 | spin_lock(&qdev->release_idr_lock); | ||
66 | idr_ret = idr_get_new_above(&qdev->release_idr, release, 1, &handle); | ||
67 | spin_unlock(&qdev->release_idr_lock); | ||
68 | if (idr_ret == -EAGAIN) | ||
69 | goto again; | ||
70 | if (ret) | ||
71 | *ret = release; | ||
72 | QXL_INFO(qdev, "allocated release %lld\n", handle); | ||
73 | release->id = handle; | ||
74 | release_fail: | ||
75 | |||
76 | return handle; | ||
77 | } | ||
78 | |||
79 | void | ||
80 | qxl_release_free(struct qxl_device *qdev, | ||
81 | struct qxl_release *release) | ||
82 | { | ||
83 | int i; | ||
84 | |||
85 | QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, | ||
86 | release->type, release->bo_count); | ||
87 | |||
88 | if (release->surface_release_id) | ||
89 | qxl_surface_id_dealloc(qdev, release->surface_release_id); | ||
90 | |||
91 | for (i = 0 ; i < release->bo_count; ++i) { | ||
92 | QXL_INFO(qdev, "release %llx\n", | ||
93 | release->bos[i]->tbo.addr_space_offset | ||
94 | - DRM_FILE_OFFSET); | ||
95 | qxl_fence_remove_release(&release->bos[i]->fence, release->id); | ||
96 | qxl_bo_unref(&release->bos[i]); | ||
97 | } | ||
98 | spin_lock(&qdev->release_idr_lock); | ||
99 | idr_remove(&qdev->release_idr, release->id); | ||
100 | spin_unlock(&qdev->release_idr_lock); | ||
101 | kfree(release); | ||
102 | } | ||
103 | |||
104 | void | ||
105 | qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, | ||
106 | struct qxl_bo *bo) | ||
107 | { | ||
108 | int i; | ||
109 | for (i = 0; i < release->bo_count; i++) | ||
110 | if (release->bos[i] == bo) | ||
111 | return; | ||
112 | |||
113 | if (release->bo_count >= QXL_MAX_RES) { | ||
114 | DRM_ERROR("exceeded max resource on a qxl_release item\n"); | ||
115 | return; | ||
116 | } | ||
117 | release->bos[release->bo_count++] = qxl_bo_ref(bo); | ||
118 | } | ||
119 | |||
120 | int qxl_release_bo_alloc(struct qxl_device *qdev, | ||
121 | struct qxl_bo **bo) | ||
122 | { | ||
123 | int ret; | ||
124 | ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, | ||
125 | bo); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | int qxl_release_reserve(struct qxl_device *qdev, | ||
130 | struct qxl_release *release, bool no_wait) | ||
131 | { | ||
132 | int ret; | ||
133 | if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { | ||
134 | ret = qxl_bo_reserve(release->bos[0], no_wait); | ||
135 | if (ret) | ||
136 | return ret; | ||
137 | } | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | void qxl_release_unreserve(struct qxl_device *qdev, | ||
142 | struct qxl_release *release) | ||
143 | { | ||
144 | if (atomic_dec_and_test(&release->bos[0]->reserve_count)) | ||
145 | qxl_bo_unreserve(release->bos[0]); | ||
146 | } | ||
147 | |||
148 | int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, | ||
149 | enum qxl_surface_cmd_type surface_cmd_type, | ||
150 | struct qxl_release *create_rel, | ||
151 | struct qxl_release **release) | ||
152 | { | ||
153 | int ret; | ||
154 | |||
155 | if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { | ||
156 | int idr_ret; | ||
157 | struct qxl_bo *bo; | ||
158 | union qxl_release_info *info; | ||
159 | |||
160 | /* stash the release after the create command */ | ||
161 | idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); | ||
162 | bo = qxl_bo_ref(create_rel->bos[0]); | ||
163 | |||
164 | (*release)->release_offset = create_rel->release_offset + 64; | ||
165 | |||
166 | qxl_release_add_res(qdev, *release, bo); | ||
167 | |||
168 | ret = qxl_release_reserve(qdev, *release, false); | ||
169 | if (ret) { | ||
170 | DRM_ERROR("release reserve failed\n"); | ||
171 | goto out_unref; | ||
172 | } | ||
173 | info = qxl_release_map(qdev, *release); | ||
174 | info->id = idr_ret; | ||
175 | qxl_release_unmap(qdev, *release, info); | ||
176 | |||
177 | |||
178 | out_unref: | ||
179 | qxl_bo_unref(&bo); | ||
180 | return ret; | ||
181 | } | ||
182 | |||
183 | return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), | ||
184 | QXL_RELEASE_SURFACE_CMD, release, NULL); | ||
185 | } | ||
186 | |||
187 | int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, | ||
188 | int type, struct qxl_release **release, | ||
189 | struct qxl_bo **rbo) | ||
190 | { | ||
191 | struct qxl_bo *bo; | ||
192 | int idr_ret; | ||
193 | int ret; | ||
194 | union qxl_release_info *info; | ||
195 | int cur_idx; | ||
196 | |||
197 | if (type == QXL_RELEASE_DRAWABLE) | ||
198 | cur_idx = 0; | ||
199 | else if (type == QXL_RELEASE_SURFACE_CMD) | ||
200 | cur_idx = 1; | ||
201 | else if (type == QXL_RELEASE_CURSOR_CMD) | ||
202 | cur_idx = 2; | ||
203 | else { | ||
204 | DRM_ERROR("got illegal type: %d\n", type); | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | |||
208 | idr_ret = qxl_release_alloc(qdev, type, release); | ||
209 | |||
210 | mutex_lock(&qdev->release_mutex); | ||
211 | if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { | ||
212 | qxl_bo_unref(&qdev->current_release_bo[cur_idx]); | ||
213 | qdev->current_release_bo_offset[cur_idx] = 0; | ||
214 | qdev->current_release_bo[cur_idx] = NULL; | ||
215 | } | ||
216 | if (!qdev->current_release_bo[cur_idx]) { | ||
217 | ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); | ||
218 | if (ret) { | ||
219 | mutex_unlock(&qdev->release_mutex); | ||
220 | return ret; | ||
221 | } | ||
222 | |||
223 | /* pin releases bo's they are too messy to evict */ | ||
224 | ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); | ||
225 | qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); | ||
226 | qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); | ||
227 | } | ||
228 | |||
229 | bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); | ||
230 | |||
231 | (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; | ||
232 | qdev->current_release_bo_offset[cur_idx]++; | ||
233 | |||
234 | if (rbo) | ||
235 | *rbo = bo; | ||
236 | |||
237 | qxl_release_add_res(qdev, *release, bo); | ||
238 | |||
239 | ret = qxl_release_reserve(qdev, *release, false); | ||
240 | mutex_unlock(&qdev->release_mutex); | ||
241 | if (ret) | ||
242 | goto out_unref; | ||
243 | |||
244 | info = qxl_release_map(qdev, *release); | ||
245 | info->id = idr_ret; | ||
246 | qxl_release_unmap(qdev, *release, info); | ||
247 | |||
248 | out_unref: | ||
249 | qxl_bo_unref(&bo); | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | int qxl_fence_releaseable(struct qxl_device *qdev, | ||
254 | struct qxl_release *release) | ||
255 | { | ||
256 | int i, ret; | ||
257 | for (i = 0; i < release->bo_count; i++) { | ||
258 | if (!release->bos[i]->tbo.sync_obj) | ||
259 | release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; | ||
260 | ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); | ||
261 | if (ret) | ||
262 | return ret; | ||
263 | } | ||
264 | return 0; | ||
265 | } | ||
266 | |||
267 | struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, | ||
268 | uint64_t id) | ||
269 | { | ||
270 | struct qxl_release *release; | ||
271 | |||
272 | spin_lock(&qdev->release_idr_lock); | ||
273 | release = idr_find(&qdev->release_idr, id); | ||
274 | spin_unlock(&qdev->release_idr_lock); | ||
275 | if (!release) { | ||
276 | DRM_ERROR("failed to find id in release_idr\n"); | ||
277 | return NULL; | ||
278 | } | ||
279 | if (release->bo_count < 1) { | ||
280 | DRM_ERROR("read a released resource with 0 bos\n"); | ||
281 | return NULL; | ||
282 | } | ||
283 | return release; | ||
284 | } | ||
285 | |||
286 | union qxl_release_info *qxl_release_map(struct qxl_device *qdev, | ||
287 | struct qxl_release *release) | ||
288 | { | ||
289 | void *ptr; | ||
290 | union qxl_release_info *info; | ||
291 | struct qxl_bo *bo = release->bos[0]; | ||
292 | |||
293 | ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); | ||
294 | info = ptr + (release->release_offset & ~PAGE_SIZE); | ||
295 | return info; | ||
296 | } | ||
297 | |||
298 | void qxl_release_unmap(struct qxl_device *qdev, | ||
299 | struct qxl_release *release, | ||
300 | union qxl_release_info *info) | ||
301 | { | ||
302 | struct qxl_bo *bo = release->bos[0]; | ||
303 | void *ptr; | ||
304 | |||
305 | ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); | ||
306 | qxl_bo_kunmap_atomic_page(qdev, bo, ptr); | ||
307 | } | ||
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c new file mode 100644 index 000000000000..aa9fb9afca0b --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_ttm.c | |||
@@ -0,0 +1,577 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat Inc. | ||
3 | * | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice shall be included in | ||
12 | * all copies or substantial portions of the Software. | ||
13 | * | ||
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
21 | * | ||
22 | * Authors: Dave Airlie | ||
23 | * Alon Levy | ||
24 | */ | ||
25 | |||
26 | #include <ttm/ttm_bo_api.h> | ||
27 | #include <ttm/ttm_bo_driver.h> | ||
28 | #include <ttm/ttm_placement.h> | ||
29 | #include <ttm/ttm_page_alloc.h> | ||
30 | #include <ttm/ttm_module.h> | ||
31 | #include <drm/drmP.h> | ||
32 | #include <drm/drm.h> | ||
33 | #include <drm/qxl_drm.h> | ||
34 | #include "qxl_drv.h" | ||
35 | #include "qxl_object.h" | ||
36 | |||
37 | #include <linux/delay.h> | ||
38 | static int qxl_ttm_debugfs_init(struct qxl_device *qdev); | ||
39 | |||
40 | static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) | ||
41 | { | ||
42 | struct qxl_mman *mman; | ||
43 | struct qxl_device *qdev; | ||
44 | |||
45 | mman = container_of(bdev, struct qxl_mman, bdev); | ||
46 | qdev = container_of(mman, struct qxl_device, mman); | ||
47 | return qdev; | ||
48 | } | ||
49 | |||
50 | static int qxl_ttm_mem_global_init(struct drm_global_reference *ref) | ||
51 | { | ||
52 | return ttm_mem_global_init(ref->object); | ||
53 | } | ||
54 | |||
55 | static void qxl_ttm_mem_global_release(struct drm_global_reference *ref) | ||
56 | { | ||
57 | ttm_mem_global_release(ref->object); | ||
58 | } | ||
59 | |||
60 | static int qxl_ttm_global_init(struct qxl_device *qdev) | ||
61 | { | ||
62 | struct drm_global_reference *global_ref; | ||
63 | int r; | ||
64 | |||
65 | qdev->mman.mem_global_referenced = false; | ||
66 | global_ref = &qdev->mman.mem_global_ref; | ||
67 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | ||
68 | global_ref->size = sizeof(struct ttm_mem_global); | ||
69 | global_ref->init = &qxl_ttm_mem_global_init; | ||
70 | global_ref->release = &qxl_ttm_mem_global_release; | ||
71 | |||
72 | r = drm_global_item_ref(global_ref); | ||
73 | if (r != 0) { | ||
74 | DRM_ERROR("Failed setting up TTM memory accounting " | ||
75 | "subsystem.\n"); | ||
76 | return r; | ||
77 | } | ||
78 | |||
79 | qdev->mman.bo_global_ref.mem_glob = | ||
80 | qdev->mman.mem_global_ref.object; | ||
81 | global_ref = &qdev->mman.bo_global_ref.ref; | ||
82 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | ||
83 | global_ref->size = sizeof(struct ttm_bo_global); | ||
84 | global_ref->init = &ttm_bo_global_init; | ||
85 | global_ref->release = &ttm_bo_global_release; | ||
86 | r = drm_global_item_ref(global_ref); | ||
87 | if (r != 0) { | ||
88 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | ||
89 | drm_global_item_unref(&qdev->mman.mem_global_ref); | ||
90 | return r; | ||
91 | } | ||
92 | |||
93 | qdev->mman.mem_global_referenced = true; | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static void qxl_ttm_global_fini(struct qxl_device *qdev) | ||
98 | { | ||
99 | if (qdev->mman.mem_global_referenced) { | ||
100 | drm_global_item_unref(&qdev->mman.bo_global_ref.ref); | ||
101 | drm_global_item_unref(&qdev->mman.mem_global_ref); | ||
102 | qdev->mman.mem_global_referenced = false; | ||
103 | } | ||
104 | } | ||
105 | |||
106 | static struct vm_operations_struct qxl_ttm_vm_ops; | ||
107 | static const struct vm_operations_struct *ttm_vm_ops; | ||
108 | |||
109 | static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
110 | { | ||
111 | struct ttm_buffer_object *bo; | ||
112 | struct qxl_device *qdev; | ||
113 | int r; | ||
114 | |||
115 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | ||
116 | if (bo == NULL) | ||
117 | return VM_FAULT_NOPAGE; | ||
118 | qdev = qxl_get_qdev(bo->bdev); | ||
119 | r = ttm_vm_ops->fault(vma, vmf); | ||
120 | return r; | ||
121 | } | ||
122 | |||
123 | int qxl_mmap(struct file *filp, struct vm_area_struct *vma) | ||
124 | { | ||
125 | struct drm_file *file_priv; | ||
126 | struct qxl_device *qdev; | ||
127 | int r; | ||
128 | |||
129 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | ||
130 | pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", | ||
131 | __func__, vma->vm_pgoff); | ||
132 | return drm_mmap(filp, vma); | ||
133 | } | ||
134 | |||
135 | file_priv = filp->private_data; | ||
136 | qdev = file_priv->minor->dev->dev_private; | ||
137 | if (qdev == NULL) { | ||
138 | DRM_ERROR( | ||
139 | "filp->private_data->minor->dev->dev_private == NULL\n"); | ||
140 | return -EINVAL; | ||
141 | } | ||
142 | QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", | ||
143 | __func__, filp->private_data, vma->vm_pgoff); | ||
144 | |||
145 | r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); | ||
146 | if (unlikely(r != 0)) | ||
147 | return r; | ||
148 | if (unlikely(ttm_vm_ops == NULL)) { | ||
149 | ttm_vm_ops = vma->vm_ops; | ||
150 | qxl_ttm_vm_ops = *ttm_vm_ops; | ||
151 | qxl_ttm_vm_ops.fault = &qxl_ttm_fault; | ||
152 | } | ||
153 | vma->vm_ops = &qxl_ttm_vm_ops; | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | ||
158 | { | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | ||
163 | struct ttm_mem_type_manager *man) | ||
164 | { | ||
165 | struct qxl_device *qdev; | ||
166 | |||
167 | qdev = qxl_get_qdev(bdev); | ||
168 | |||
169 | switch (type) { | ||
170 | case TTM_PL_SYSTEM: | ||
171 | /* System memory */ | ||
172 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | ||
173 | man->available_caching = TTM_PL_MASK_CACHING; | ||
174 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
175 | break; | ||
176 | case TTM_PL_VRAM: | ||
177 | case TTM_PL_PRIV0: | ||
178 | /* "On-card" video ram */ | ||
179 | man->func = &ttm_bo_manager_func; | ||
180 | man->gpu_offset = 0; | ||
181 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | ||
182 | TTM_MEMTYPE_FLAG_MAPPABLE; | ||
183 | man->available_caching = TTM_PL_MASK_CACHING; | ||
184 | man->default_caching = TTM_PL_FLAG_CACHED; | ||
185 | break; | ||
186 | default: | ||
187 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | static void qxl_evict_flags(struct ttm_buffer_object *bo, | ||
194 | struct ttm_placement *placement) | ||
195 | { | ||
196 | struct qxl_bo *qbo; | ||
197 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | ||
198 | |||
199 | if (!qxl_ttm_bo_is_qxl_bo(bo)) { | ||
200 | placement->fpfn = 0; | ||
201 | placement->lpfn = 0; | ||
202 | placement->placement = &placements; | ||
203 | placement->busy_placement = &placements; | ||
204 | placement->num_placement = 1; | ||
205 | placement->num_busy_placement = 1; | ||
206 | return; | ||
207 | } | ||
208 | qbo = container_of(bo, struct qxl_bo, tbo); | ||
209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); | ||
210 | *placement = qbo->placement; | ||
211 | } | ||
212 | |||
213 | static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) | ||
214 | { | ||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, | ||
219 | struct ttm_mem_reg *mem) | ||
220 | { | ||
221 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | ||
222 | struct qxl_device *qdev = qxl_get_qdev(bdev); | ||
223 | |||
224 | mem->bus.addr = NULL; | ||
225 | mem->bus.offset = 0; | ||
226 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | ||
227 | mem->bus.base = 0; | ||
228 | mem->bus.is_iomem = false; | ||
229 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | ||
230 | return -EINVAL; | ||
231 | switch (mem->mem_type) { | ||
232 | case TTM_PL_SYSTEM: | ||
233 | /* system memory */ | ||
234 | return 0; | ||
235 | case TTM_PL_VRAM: | ||
236 | mem->bus.is_iomem = true; | ||
237 | mem->bus.base = qdev->vram_base; | ||
238 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
239 | break; | ||
240 | case TTM_PL_PRIV0: | ||
241 | mem->bus.is_iomem = true; | ||
242 | mem->bus.base = qdev->surfaceram_base; | ||
243 | mem->bus.offset = mem->start << PAGE_SHIFT; | ||
244 | break; | ||
245 | default: | ||
246 | return -EINVAL; | ||
247 | } | ||
248 | return 0; | ||
249 | } | ||
250 | |||
251 | static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev, | ||
252 | struct ttm_mem_reg *mem) | ||
253 | { | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * TTM backend functions. | ||
258 | */ | ||
259 | struct qxl_ttm_tt { | ||
260 | struct ttm_dma_tt ttm; | ||
261 | struct qxl_device *qdev; | ||
262 | u64 offset; | ||
263 | }; | ||
264 | |||
265 | static int qxl_ttm_backend_bind(struct ttm_tt *ttm, | ||
266 | struct ttm_mem_reg *bo_mem) | ||
267 | { | ||
268 | struct qxl_ttm_tt *gtt = (void *)ttm; | ||
269 | |||
270 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | ||
271 | if (!ttm->num_pages) { | ||
272 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | ||
273 | ttm->num_pages, bo_mem, ttm); | ||
274 | } | ||
275 | /* Not implemented */ | ||
276 | return -1; | ||
277 | } | ||
278 | |||
279 | static int qxl_ttm_backend_unbind(struct ttm_tt *ttm) | ||
280 | { | ||
281 | /* Not implemented */ | ||
282 | return -1; | ||
283 | } | ||
284 | |||
285 | static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) | ||
286 | { | ||
287 | struct qxl_ttm_tt *gtt = (void *)ttm; | ||
288 | |||
289 | ttm_dma_tt_fini(>t->ttm); | ||
290 | kfree(gtt); | ||
291 | } | ||
292 | |||
293 | static struct ttm_backend_func qxl_backend_func = { | ||
294 | .bind = &qxl_ttm_backend_bind, | ||
295 | .unbind = &qxl_ttm_backend_unbind, | ||
296 | .destroy = &qxl_ttm_backend_destroy, | ||
297 | }; | ||
298 | |||
299 | static int qxl_ttm_tt_populate(struct ttm_tt *ttm) | ||
300 | { | ||
301 | int r; | ||
302 | |||
303 | if (ttm->state != tt_unpopulated) | ||
304 | return 0; | ||
305 | |||
306 | r = ttm_pool_populate(ttm); | ||
307 | if (r) | ||
308 | return r; | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm) | ||
314 | { | ||
315 | ttm_pool_unpopulate(ttm); | ||
316 | } | ||
317 | |||
318 | struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, | ||
319 | unsigned long size, uint32_t page_flags, | ||
320 | struct page *dummy_read_page) | ||
321 | { | ||
322 | struct qxl_device *qdev; | ||
323 | struct qxl_ttm_tt *gtt; | ||
324 | |||
325 | qdev = qxl_get_qdev(bdev); | ||
326 | gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); | ||
327 | if (gtt == NULL) | ||
328 | return NULL; | ||
329 | gtt->ttm.ttm.func = &qxl_backend_func; | ||
330 | gtt->qdev = qdev; | ||
331 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, | ||
332 | dummy_read_page)) { | ||
333 | kfree(gtt); | ||
334 | return NULL; | ||
335 | } | ||
336 | return >t->ttm.ttm; | ||
337 | } | ||
338 | |||
339 | static void qxl_move_null(struct ttm_buffer_object *bo, | ||
340 | struct ttm_mem_reg *new_mem) | ||
341 | { | ||
342 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
343 | |||
344 | BUG_ON(old_mem->mm_node != NULL); | ||
345 | *old_mem = *new_mem; | ||
346 | new_mem->mm_node = NULL; | ||
347 | } | ||
348 | |||
349 | static int qxl_bo_move(struct ttm_buffer_object *bo, | ||
350 | bool evict, bool interruptible, | ||
351 | bool no_wait_gpu, | ||
352 | struct ttm_mem_reg *new_mem) | ||
353 | { | ||
354 | struct ttm_mem_reg *old_mem = &bo->mem; | ||
355 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | ||
356 | qxl_move_null(bo, new_mem); | ||
357 | return 0; | ||
358 | } | ||
359 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | ||
360 | } | ||
361 | |||
362 | |||
363 | static int qxl_sync_obj_wait(void *sync_obj, | ||
364 | bool lazy, bool interruptible) | ||
365 | { | ||
366 | struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; | ||
367 | int count = 0, sc = 0; | ||
368 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | ||
369 | |||
370 | if (qfence->num_active_releases == 0) | ||
371 | return 0; | ||
372 | |||
373 | retry: | ||
374 | if (sc == 0) { | ||
375 | if (bo->type == QXL_GEM_DOMAIN_SURFACE) | ||
376 | qxl_update_surface(qfence->qdev, bo); | ||
377 | } else if (sc >= 1) { | ||
378 | qxl_io_notify_oom(qfence->qdev); | ||
379 | } | ||
380 | |||
381 | sc++; | ||
382 | |||
383 | for (count = 0; count < 10; count++) { | ||
384 | bool ret; | ||
385 | ret = qxl_queue_garbage_collect(qfence->qdev, true); | ||
386 | if (ret == false) | ||
387 | break; | ||
388 | |||
389 | if (qfence->num_active_releases == 0) | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | if (qfence->num_active_releases) { | ||
394 | bool have_drawable_releases = false; | ||
395 | void **slot; | ||
396 | struct radix_tree_iter iter; | ||
397 | int release_id; | ||
398 | |||
399 | radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) { | ||
400 | struct qxl_release *release; | ||
401 | |||
402 | release_id = iter.index; | ||
403 | release = qxl_release_from_id_locked(qfence->qdev, release_id); | ||
404 | if (release == NULL) | ||
405 | continue; | ||
406 | |||
407 | if (release->type == QXL_RELEASE_DRAWABLE) | ||
408 | have_drawable_releases = true; | ||
409 | } | ||
410 | |||
411 | qxl_queue_garbage_collect(qfence->qdev, true); | ||
412 | |||
413 | if (have_drawable_releases || sc < 4) { | ||
414 | if (sc > 2) | ||
415 | /* back off */ | ||
416 | usleep_range(500, 1000); | ||
417 | if (have_drawable_releases && sc > 300) { | ||
418 | WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases); | ||
419 | return -EBUSY; | ||
420 | } | ||
421 | goto retry; | ||
422 | } | ||
423 | } | ||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static int qxl_sync_obj_flush(void *sync_obj) | ||
428 | { | ||
429 | return 0; | ||
430 | } | ||
431 | |||
432 | static void qxl_sync_obj_unref(void **sync_obj) | ||
433 | { | ||
434 | } | ||
435 | |||
436 | static void *qxl_sync_obj_ref(void *sync_obj) | ||
437 | { | ||
438 | return sync_obj; | ||
439 | } | ||
440 | |||
441 | static bool qxl_sync_obj_signaled(void *sync_obj) | ||
442 | { | ||
443 | struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; | ||
444 | return (qfence->num_active_releases == 0); | ||
445 | } | ||
446 | |||
447 | static void qxl_bo_move_notify(struct ttm_buffer_object *bo, | ||
448 | struct ttm_mem_reg *new_mem) | ||
449 | { | ||
450 | struct qxl_bo *qbo; | ||
451 | struct qxl_device *qdev; | ||
452 | |||
453 | if (!qxl_ttm_bo_is_qxl_bo(bo)) | ||
454 | return; | ||
455 | qbo = container_of(bo, struct qxl_bo, tbo); | ||
456 | qdev = qbo->gem_base.dev->dev_private; | ||
457 | |||
458 | if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id) | ||
459 | qxl_surface_evict(qdev, qbo, new_mem ? true : false); | ||
460 | } | ||
461 | |||
462 | static struct ttm_bo_driver qxl_bo_driver = { | ||
463 | .ttm_tt_create = &qxl_ttm_tt_create, | ||
464 | .ttm_tt_populate = &qxl_ttm_tt_populate, | ||
465 | .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, | ||
466 | .invalidate_caches = &qxl_invalidate_caches, | ||
467 | .init_mem_type = &qxl_init_mem_type, | ||
468 | .evict_flags = &qxl_evict_flags, | ||
469 | .move = &qxl_bo_move, | ||
470 | .verify_access = &qxl_verify_access, | ||
471 | .io_mem_reserve = &qxl_ttm_io_mem_reserve, | ||
472 | .io_mem_free = &qxl_ttm_io_mem_free, | ||
473 | .sync_obj_signaled = &qxl_sync_obj_signaled, | ||
474 | .sync_obj_wait = &qxl_sync_obj_wait, | ||
475 | .sync_obj_flush = &qxl_sync_obj_flush, | ||
476 | .sync_obj_unref = &qxl_sync_obj_unref, | ||
477 | .sync_obj_ref = &qxl_sync_obj_ref, | ||
478 | .move_notify = &qxl_bo_move_notify, | ||
479 | }; | ||
480 | |||
481 | |||
482 | |||
483 | int qxl_ttm_init(struct qxl_device *qdev) | ||
484 | { | ||
485 | int r; | ||
486 | int num_io_pages; /* != rom->num_io_pages, we include surface0 */ | ||
487 | |||
488 | r = qxl_ttm_global_init(qdev); | ||
489 | if (r) | ||
490 | return r; | ||
491 | /* No others user of address space so set it to 0 */ | ||
492 | r = ttm_bo_device_init(&qdev->mman.bdev, | ||
493 | qdev->mman.bo_global_ref.ref.object, | ||
494 | &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0); | ||
495 | if (r) { | ||
496 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | ||
497 | return r; | ||
498 | } | ||
499 | /* NOTE: this includes the framebuffer (aka surface 0) */ | ||
500 | num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE; | ||
501 | r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM, | ||
502 | num_io_pages); | ||
503 | if (r) { | ||
504 | DRM_ERROR("Failed initializing VRAM heap.\n"); | ||
505 | return r; | ||
506 | } | ||
507 | r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0, | ||
508 | qdev->surfaceram_size / PAGE_SIZE); | ||
509 | if (r) { | ||
510 | DRM_ERROR("Failed initializing Surfaces heap.\n"); | ||
511 | return r; | ||
512 | } | ||
513 | DRM_INFO("qxl: %uM of VRAM memory size\n", | ||
514 | (unsigned)qdev->vram_size / (1024 * 1024)); | ||
515 | DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", | ||
516 | ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); | ||
517 | if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) | ||
518 | qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; | ||
519 | r = qxl_ttm_debugfs_init(qdev); | ||
520 | if (r) { | ||
521 | DRM_ERROR("Failed to init debugfs\n"); | ||
522 | return r; | ||
523 | } | ||
524 | return 0; | ||
525 | } | ||
526 | |||
527 | void qxl_ttm_fini(struct qxl_device *qdev) | ||
528 | { | ||
529 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); | ||
530 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0); | ||
531 | ttm_bo_device_release(&qdev->mman.bdev); | ||
532 | qxl_ttm_global_fini(qdev); | ||
533 | DRM_INFO("qxl: ttm finalized\n"); | ||
534 | } | ||
535 | |||
536 | |||
537 | #define QXL_DEBUGFS_MEM_TYPES 2 | ||
538 | |||
539 | #if defined(CONFIG_DEBUG_FS) | ||
540 | static int qxl_mm_dump_table(struct seq_file *m, void *data) | ||
541 | { | ||
542 | struct drm_info_node *node = (struct drm_info_node *)m->private; | ||
543 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | ||
544 | struct drm_device *dev = node->minor->dev; | ||
545 | struct qxl_device *rdev = dev->dev_private; | ||
546 | int ret; | ||
547 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | ||
548 | |||
549 | spin_lock(&glob->lru_lock); | ||
550 | ret = drm_mm_dump_table(m, mm); | ||
551 | spin_unlock(&glob->lru_lock); | ||
552 | return ret; | ||
553 | } | ||
554 | #endif | ||
555 | |||
556 | static int qxl_ttm_debugfs_init(struct qxl_device *qdev) | ||
557 | { | ||
558 | static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; | ||
559 | static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32]; | ||
560 | unsigned i; | ||
561 | |||
562 | for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) { | ||
563 | if (i == 0) | ||
564 | sprintf(qxl_mem_types_names[i], "qxl_mem_mm"); | ||
565 | else | ||
566 | sprintf(qxl_mem_types_names[i], "qxl_surf_mm"); | ||
567 | qxl_mem_types_list[i].name = qxl_mem_types_names[i]; | ||
568 | qxl_mem_types_list[i].show = &qxl_mm_dump_table; | ||
569 | qxl_mem_types_list[i].driver_features = 0; | ||
570 | if (i == 0) | ||
571 | qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; | ||
572 | else | ||
573 | qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv; | ||
574 | |||
575 | } | ||
576 | return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); | ||
577 | } | ||
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild index ba99ce3f7372..a042a957296d 100644 --- a/include/uapi/drm/Kbuild +++ b/include/uapi/drm/Kbuild | |||
@@ -8,6 +8,7 @@ header-y += i810_drm.h | |||
8 | header-y += i915_drm.h | 8 | header-y += i915_drm.h |
9 | header-y += mga_drm.h | 9 | header-y += mga_drm.h |
10 | header-y += nouveau_drm.h | 10 | header-y += nouveau_drm.h |
11 | header-y += qxl_drm.h | ||
11 | header-y += r128_drm.h | 12 | header-y += r128_drm.h |
12 | header-y += radeon_drm.h | 13 | header-y += radeon_drm.h |
13 | header-y += savage_drm.h | 14 | header-y += savage_drm.h |
diff --git a/include/uapi/drm/qxl_drm.h b/include/uapi/drm/qxl_drm.h new file mode 100644 index 000000000000..ebebd36c4117 --- /dev/null +++ b/include/uapi/drm/qxl_drm.h | |||
@@ -0,0 +1,152 @@ | |||
1 | /* | ||
2 | * Copyright 2013 Red Hat | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the "Software"), | ||
7 | * to deal in the Software without restriction, including without limitation | ||
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
9 | * and/or sell copies of the Software, and to permit persons to whom the | ||
10 | * Software is furnished to do so, subject to the following conditions: | ||
11 | * | ||
12 | * The above copyright notice and this permission notice (including the next | ||
13 | * paragraph) shall be included in all copies or substantial portions of the | ||
14 | * Software. | ||
15 | * | ||
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
19 | * THE AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
22 | * OTHER DEALINGS IN THE SOFTWARE. | ||
23 | */ | ||
24 | #ifndef QXL_DRM_H | ||
25 | #define QXL_DRM_H | ||
26 | |||
27 | #include <stddef.h> | ||
28 | #include "drm/drm.h" | ||
29 | |||
30 | /* Please note that modifications to all structs defined here are | ||
31 | * subject to backwards-compatibility constraints. | ||
32 | * | ||
33 | * Do not use pointers, use uint64_t instead for 32 bit / 64 bit user/kernel | ||
34 | * compatibility Keep fields aligned to their size | ||
35 | */ | ||
36 | |||
37 | #define QXL_GEM_DOMAIN_CPU 0 | ||
38 | #define QXL_GEM_DOMAIN_VRAM 1 | ||
39 | #define QXL_GEM_DOMAIN_SURFACE 2 | ||
40 | |||
41 | #define DRM_QXL_ALLOC 0x00 | ||
42 | #define DRM_QXL_MAP 0x01 | ||
43 | #define DRM_QXL_EXECBUFFER 0x02 | ||
44 | #define DRM_QXL_UPDATE_AREA 0x03 | ||
45 | #define DRM_QXL_GETPARAM 0x04 | ||
46 | #define DRM_QXL_CLIENTCAP 0x05 | ||
47 | |||
48 | #define DRM_QXL_ALLOC_SURF 0x06 | ||
49 | |||
50 | struct drm_qxl_alloc { | ||
51 | uint32_t size; | ||
52 | uint32_t handle; /* 0 is an invalid handle */ | ||
53 | }; | ||
54 | |||
55 | struct drm_qxl_map { | ||
56 | uint64_t offset; /* use for mmap system call */ | ||
57 | uint32_t handle; | ||
58 | uint32_t pad; | ||
59 | }; | ||
60 | |||
61 | /* | ||
62 | * dest is the bo we are writing the relocation into | ||
63 | * src is bo we are relocating. | ||
64 | * *(dest_handle.base_addr + dest_offset) = physical_address(src_handle.addr + | ||
65 | * src_offset) | ||
66 | */ | ||
67 | #define QXL_RELOC_TYPE_BO 1 | ||
68 | #define QXL_RELOC_TYPE_SURF 2 | ||
69 | |||
70 | struct drm_qxl_reloc { | ||
71 | uint64_t src_offset; /* offset into src_handle or src buffer */ | ||
72 | uint64_t dst_offset; /* offset in dest handle */ | ||
73 | uint32_t src_handle; /* dest handle to compute address from */ | ||
74 | uint32_t dst_handle; /* 0 if to command buffer */ | ||
75 | uint32_t reloc_type; | ||
76 | uint32_t pad; | ||
77 | }; | ||
78 | |||
79 | struct drm_qxl_command { | ||
80 | uint64_t __user command; /* void* */ | ||
81 | uint64_t __user relocs; /* struct drm_qxl_reloc* */ | ||
82 | uint32_t type; | ||
83 | uint32_t command_size; | ||
84 | uint32_t relocs_num; | ||
85 | uint32_t pad; | ||
86 | }; | ||
87 | |||
88 | /* XXX: call it drm_qxl_commands? */ | ||
89 | struct drm_qxl_execbuffer { | ||
90 | uint32_t flags; /* for future use */ | ||
91 | uint32_t commands_num; | ||
92 | uint64_t __user commands; /* struct drm_qxl_command* */ | ||
93 | }; | ||
94 | |||
95 | struct drm_qxl_update_area { | ||
96 | uint32_t handle; | ||
97 | uint32_t top; | ||
98 | uint32_t left; | ||
99 | uint32_t bottom; | ||
100 | uint32_t right; | ||
101 | uint32_t pad; | ||
102 | }; | ||
103 | |||
104 | #define QXL_PARAM_NUM_SURFACES 1 /* rom->n_surfaces */ | ||
105 | #define QXL_PARAM_MAX_RELOCS 2 | ||
106 | struct drm_qxl_getparam { | ||
107 | uint64_t param; | ||
108 | uint64_t value; | ||
109 | }; | ||
110 | |||
111 | /* these are one bit values */ | ||
112 | struct drm_qxl_clientcap { | ||
113 | uint32_t index; | ||
114 | uint32_t pad; | ||
115 | }; | ||
116 | |||
117 | struct drm_qxl_alloc_surf { | ||
118 | uint32_t format; | ||
119 | uint32_t width; | ||
120 | uint32_t height; | ||
121 | int32_t stride; | ||
122 | uint32_t handle; | ||
123 | uint32_t pad; | ||
124 | }; | ||
125 | |||
126 | #define DRM_IOCTL_QXL_ALLOC \ | ||
127 | DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC, struct drm_qxl_alloc) | ||
128 | |||
129 | #define DRM_IOCTL_QXL_MAP \ | ||
130 | DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_MAP, struct drm_qxl_map) | ||
131 | |||
132 | #define DRM_IOCTL_QXL_EXECBUFFER \ | ||
133 | DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_EXECBUFFER,\ | ||
134 | struct drm_qxl_execbuffer) | ||
135 | |||
136 | #define DRM_IOCTL_QXL_UPDATE_AREA \ | ||
137 | DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_UPDATE_AREA,\ | ||
138 | struct drm_qxl_update_area) | ||
139 | |||
140 | #define DRM_IOCTL_QXL_GETPARAM \ | ||
141 | DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_GETPARAM,\ | ||
142 | struct drm_qxl_getparam) | ||
143 | |||
144 | #define DRM_IOCTL_QXL_CLIENTCAP \ | ||
145 | DRM_IOW(DRM_COMMAND_BASE + DRM_QXL_CLIENTCAP,\ | ||
146 | struct drm_qxl_clientcap) | ||
147 | |||
148 | #define DRM_IOCTL_QXL_ALLOC_SURF \ | ||
149 | DRM_IOWR(DRM_COMMAND_BASE + DRM_QXL_ALLOC_SURF,\ | ||
150 | struct drm_qxl_alloc_surf) | ||
151 | |||
152 | #endif | ||