diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_kms.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 880 |
1 files changed, 880 insertions, 0 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c new file mode 100644 index 000000000000..31f9afed0a63 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -0,0 +1,880 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | #include "vmwgfx_kms.h" | ||
29 | |||
30 | /* Might need a hrtimer here? */ | ||
31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | ||
32 | |||
33 | |||
34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | ||
35 | { | ||
36 | if (du->cursor_surface) | ||
37 | vmw_surface_unreference(&du->cursor_surface); | ||
38 | if (du->cursor_dmabuf) | ||
39 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | ||
40 | drm_crtc_cleanup(&du->crtc); | ||
41 | drm_encoder_cleanup(&du->encoder); | ||
42 | drm_connector_cleanup(&du->connector); | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Display Unit Cursor functions | ||
47 | */ | ||
48 | |||
49 | int vmw_cursor_update_image(struct vmw_private *dev_priv, | ||
50 | u32 *image, u32 width, u32 height, | ||
51 | u32 hotspotX, u32 hotspotY) | ||
52 | { | ||
53 | struct { | ||
54 | u32 cmd; | ||
55 | SVGAFifoCmdDefineAlphaCursor cursor; | ||
56 | } *cmd; | ||
57 | u32 image_size = width * height * 4; | ||
58 | u32 cmd_size = sizeof(*cmd) + image_size; | ||
59 | |||
60 | if (!image) | ||
61 | return -EINVAL; | ||
62 | |||
63 | cmd = vmw_fifo_reserve(dev_priv, cmd_size); | ||
64 | if (unlikely(cmd == NULL)) { | ||
65 | DRM_ERROR("Fifo reserve failed.\n"); | ||
66 | return -ENOMEM; | ||
67 | } | ||
68 | |||
69 | memset(cmd, 0, sizeof(*cmd)); | ||
70 | |||
71 | memcpy(&cmd[1], image, image_size); | ||
72 | |||
73 | cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); | ||
74 | cmd->cursor.id = cpu_to_le32(0); | ||
75 | cmd->cursor.width = cpu_to_le32(width); | ||
76 | cmd->cursor.height = cpu_to_le32(height); | ||
77 | cmd->cursor.hotspotX = cpu_to_le32(hotspotX); | ||
78 | cmd->cursor.hotspotY = cpu_to_le32(hotspotY); | ||
79 | |||
80 | vmw_fifo_commit(dev_priv, cmd_size); | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | void vmw_cursor_update_position(struct vmw_private *dev_priv, | ||
86 | bool show, int x, int y) | ||
87 | { | ||
88 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
89 | uint32_t count; | ||
90 | |||
91 | iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); | ||
92 | iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X); | ||
93 | iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y); | ||
94 | count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT); | ||
95 | iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); | ||
96 | } | ||
97 | |||
98 | int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | ||
99 | uint32_t handle, uint32_t width, uint32_t height) | ||
100 | { | ||
101 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
102 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
103 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | ||
104 | struct vmw_surface *surface = NULL; | ||
105 | struct vmw_dma_buffer *dmabuf = NULL; | ||
106 | int ret; | ||
107 | |||
108 | if (handle) { | ||
109 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, | ||
110 | handle, &surface); | ||
111 | if (!ret) { | ||
112 | if (!surface->snooper.image) { | ||
113 | DRM_ERROR("surface not suitable for cursor\n"); | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | } else { | ||
117 | ret = vmw_user_dmabuf_lookup(tfile, | ||
118 | handle, &dmabuf); | ||
119 | if (ret) { | ||
120 | DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); | ||
121 | return -EINVAL; | ||
122 | } | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* takedown old cursor */ | ||
127 | if (du->cursor_surface) { | ||
128 | du->cursor_surface->snooper.crtc = NULL; | ||
129 | vmw_surface_unreference(&du->cursor_surface); | ||
130 | } | ||
131 | if (du->cursor_dmabuf) | ||
132 | vmw_dmabuf_unreference(&du->cursor_dmabuf); | ||
133 | |||
134 | /* setup new image */ | ||
135 | if (surface) { | ||
136 | /* vmw_user_surface_lookup takes one reference */ | ||
137 | du->cursor_surface = surface; | ||
138 | |||
139 | du->cursor_surface->snooper.crtc = crtc; | ||
140 | du->cursor_age = du->cursor_surface->snooper.age; | ||
141 | vmw_cursor_update_image(dev_priv, surface->snooper.image, | ||
142 | 64, 64, du->hotspot_x, du->hotspot_y); | ||
143 | } else if (dmabuf) { | ||
144 | struct ttm_bo_kmap_obj map; | ||
145 | unsigned long kmap_offset; | ||
146 | unsigned long kmap_num; | ||
147 | void *virtual; | ||
148 | bool dummy; | ||
149 | |||
150 | /* vmw_user_surface_lookup takes one reference */ | ||
151 | du->cursor_dmabuf = dmabuf; | ||
152 | |||
153 | kmap_offset = 0; | ||
154 | kmap_num = (64*64*4) >> PAGE_SHIFT; | ||
155 | |||
156 | ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0); | ||
157 | if (unlikely(ret != 0)) { | ||
158 | DRM_ERROR("reserve failed\n"); | ||
159 | return -EINVAL; | ||
160 | } | ||
161 | |||
162 | ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map); | ||
163 | if (unlikely(ret != 0)) | ||
164 | goto err_unreserve; | ||
165 | |||
166 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | ||
167 | vmw_cursor_update_image(dev_priv, virtual, 64, 64, | ||
168 | du->hotspot_x, du->hotspot_y); | ||
169 | |||
170 | ttm_bo_kunmap(&map); | ||
171 | err_unreserve: | ||
172 | ttm_bo_unreserve(&dmabuf->base); | ||
173 | |||
174 | } else { | ||
175 | vmw_cursor_update_position(dev_priv, false, 0, 0); | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | ||
185 | { | ||
186 | struct vmw_private *dev_priv = vmw_priv(crtc->dev); | ||
187 | struct vmw_display_unit *du = vmw_crtc_to_du(crtc); | ||
188 | bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false; | ||
189 | |||
190 | du->cursor_x = x + crtc->x; | ||
191 | du->cursor_y = y + crtc->y; | ||
192 | |||
193 | vmw_cursor_update_position(dev_priv, shown, | ||
194 | du->cursor_x, du->cursor_y); | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | void vmw_kms_cursor_snoop(struct vmw_surface *srf, | ||
200 | struct ttm_object_file *tfile, | ||
201 | struct ttm_buffer_object *bo, | ||
202 | SVGA3dCmdHeader *header) | ||
203 | { | ||
204 | struct ttm_bo_kmap_obj map; | ||
205 | unsigned long kmap_offset; | ||
206 | unsigned long kmap_num; | ||
207 | SVGA3dCopyBox *box; | ||
208 | unsigned box_count; | ||
209 | void *virtual; | ||
210 | bool dummy; | ||
211 | struct vmw_dma_cmd { | ||
212 | SVGA3dCmdHeader header; | ||
213 | SVGA3dCmdSurfaceDMA dma; | ||
214 | } *cmd; | ||
215 | int ret; | ||
216 | |||
217 | cmd = container_of(header, struct vmw_dma_cmd, header); | ||
218 | |||
219 | /* No snooper installed */ | ||
220 | if (!srf->snooper.image) | ||
221 | return; | ||
222 | |||
223 | if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { | ||
224 | DRM_ERROR("face and mipmap for cursors should never != 0\n"); | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | if (cmd->header.size < 64) { | ||
229 | DRM_ERROR("at least one full copy box must be given\n"); | ||
230 | return; | ||
231 | } | ||
232 | |||
233 | box = (SVGA3dCopyBox *)&cmd[1]; | ||
234 | box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / | ||
235 | sizeof(SVGA3dCopyBox); | ||
236 | |||
237 | if (cmd->dma.guest.pitch != (64 * 4) || | ||
238 | cmd->dma.guest.ptr.offset % PAGE_SIZE || | ||
239 | box->x != 0 || box->y != 0 || box->z != 0 || | ||
240 | box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || | ||
241 | box->w != 64 || box->h != 64 || box->d != 1 || | ||
242 | box_count != 1) { | ||
243 | /* TODO handle none page aligned offsets */ | ||
244 | /* TODO handle partial uploads and pitch != 256 */ | ||
245 | /* TODO handle more then one copy (size != 64) */ | ||
246 | DRM_ERROR("lazy programer, cant handle wierd stuff\n"); | ||
247 | return; | ||
248 | } | ||
249 | |||
250 | kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; | ||
251 | kmap_num = (64*64*4) >> PAGE_SHIFT; | ||
252 | |||
253 | ret = ttm_bo_reserve(bo, true, false, false, 0); | ||
254 | if (unlikely(ret != 0)) { | ||
255 | DRM_ERROR("reserve failed\n"); | ||
256 | return; | ||
257 | } | ||
258 | |||
259 | ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); | ||
260 | if (unlikely(ret != 0)) | ||
261 | goto err_unreserve; | ||
262 | |||
263 | virtual = ttm_kmap_obj_virtual(&map, &dummy); | ||
264 | |||
265 | memcpy(srf->snooper.image, virtual, 64*64*4); | ||
266 | srf->snooper.age++; | ||
267 | |||
268 | /* we can't call this function from this function since execbuf has | ||
269 | * reserved fifo space. | ||
270 | * | ||
271 | * if (srf->snooper.crtc) | ||
272 | * vmw_ldu_crtc_cursor_update_image(dev_priv, | ||
273 | * srf->snooper.image, 64, 64, | ||
274 | * du->hotspot_x, du->hotspot_y); | ||
275 | */ | ||
276 | |||
277 | ttm_bo_kunmap(&map); | ||
278 | err_unreserve: | ||
279 | ttm_bo_unreserve(bo); | ||
280 | } | ||
281 | |||
282 | void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) | ||
283 | { | ||
284 | struct drm_device *dev = dev_priv->dev; | ||
285 | struct vmw_display_unit *du; | ||
286 | struct drm_crtc *crtc; | ||
287 | |||
288 | mutex_lock(&dev->mode_config.mutex); | ||
289 | |||
290 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
291 | du = vmw_crtc_to_du(crtc); | ||
292 | if (!du->cursor_surface || | ||
293 | du->cursor_age == du->cursor_surface->snooper.age) | ||
294 | continue; | ||
295 | |||
296 | du->cursor_age = du->cursor_surface->snooper.age; | ||
297 | vmw_cursor_update_image(dev_priv, | ||
298 | du->cursor_surface->snooper.image, | ||
299 | 64, 64, du->hotspot_x, du->hotspot_y); | ||
300 | } | ||
301 | |||
302 | mutex_unlock(&dev->mode_config.mutex); | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * Generic framebuffer code | ||
307 | */ | ||
308 | |||
309 | int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, | ||
310 | struct drm_file *file_priv, | ||
311 | unsigned int *handle) | ||
312 | { | ||
313 | if (handle) | ||
314 | handle = 0; | ||
315 | |||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * Surface framebuffer code | ||
321 | */ | ||
322 | |||
323 | #define vmw_framebuffer_to_vfbs(x) \ | ||
324 | container_of(x, struct vmw_framebuffer_surface, base.base) | ||
325 | |||
326 | struct vmw_framebuffer_surface { | ||
327 | struct vmw_framebuffer base; | ||
328 | struct vmw_surface *surface; | ||
329 | struct delayed_work d_work; | ||
330 | struct mutex work_lock; | ||
331 | bool present_fs; | ||
332 | }; | ||
333 | |||
334 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | ||
335 | { | ||
336 | struct vmw_framebuffer_surface *vfb = | ||
337 | vmw_framebuffer_to_vfbs(framebuffer); | ||
338 | |||
339 | cancel_delayed_work_sync(&vfb->d_work); | ||
340 | drm_framebuffer_cleanup(framebuffer); | ||
341 | vmw_surface_unreference(&vfb->surface); | ||
342 | |||
343 | kfree(framebuffer); | ||
344 | } | ||
345 | |||
346 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | ||
347 | { | ||
348 | struct delayed_work *d_work = | ||
349 | container_of(work, struct delayed_work, work); | ||
350 | struct vmw_framebuffer_surface *vfbs = | ||
351 | container_of(d_work, struct vmw_framebuffer_surface, d_work); | ||
352 | struct vmw_surface *surf = vfbs->surface; | ||
353 | struct drm_framebuffer *framebuffer = &vfbs->base.base; | ||
354 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
355 | |||
356 | struct { | ||
357 | SVGA3dCmdHeader header; | ||
358 | SVGA3dCmdPresent body; | ||
359 | SVGA3dCopyRect cr; | ||
360 | } *cmd; | ||
361 | |||
362 | mutex_lock(&vfbs->work_lock); | ||
363 | if (!vfbs->present_fs) | ||
364 | goto out_unlock; | ||
365 | |||
366 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); | ||
367 | if (unlikely(cmd == NULL)) | ||
368 | goto out_resched; | ||
369 | |||
370 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
371 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr)); | ||
372 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
373 | cmd->cr.x = cpu_to_le32(0); | ||
374 | cmd->cr.y = cpu_to_le32(0); | ||
375 | cmd->cr.srcx = cmd->cr.x; | ||
376 | cmd->cr.srcy = cmd->cr.y; | ||
377 | cmd->cr.w = cpu_to_le32(framebuffer->width); | ||
378 | cmd->cr.h = cpu_to_le32(framebuffer->height); | ||
379 | vfbs->present_fs = false; | ||
380 | vmw_fifo_commit(dev_priv, sizeof(*cmd)); | ||
381 | out_resched: | ||
382 | /** | ||
383 | * Will not re-add if already pending. | ||
384 | */ | ||
385 | schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
386 | out_unlock: | ||
387 | mutex_unlock(&vfbs->work_lock); | ||
388 | } | ||
389 | |||
390 | |||
391 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | ||
392 | unsigned flags, unsigned color, | ||
393 | struct drm_clip_rect *clips, | ||
394 | unsigned num_clips) | ||
395 | { | ||
396 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
397 | struct vmw_framebuffer_surface *vfbs = | ||
398 | vmw_framebuffer_to_vfbs(framebuffer); | ||
399 | struct vmw_surface *surf = vfbs->surface; | ||
400 | struct drm_clip_rect norect; | ||
401 | SVGA3dCopyRect *cr; | ||
402 | int i, inc = 1; | ||
403 | |||
404 | struct { | ||
405 | SVGA3dCmdHeader header; | ||
406 | SVGA3dCmdPresent body; | ||
407 | SVGA3dCopyRect cr; | ||
408 | } *cmd; | ||
409 | |||
410 | if (!num_clips || | ||
411 | !(dev_priv->fifo.capabilities & | ||
412 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | ||
413 | int ret; | ||
414 | |||
415 | mutex_lock(&vfbs->work_lock); | ||
416 | vfbs->present_fs = true; | ||
417 | ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE); | ||
418 | mutex_unlock(&vfbs->work_lock); | ||
419 | if (ret) { | ||
420 | /** | ||
421 | * No work pending, Force immediate present. | ||
422 | */ | ||
423 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | ||
424 | } | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | if (!num_clips) { | ||
429 | num_clips = 1; | ||
430 | clips = &norect; | ||
431 | norect.x1 = norect.y1 = 0; | ||
432 | norect.x2 = framebuffer->width; | ||
433 | norect.y2 = framebuffer->height; | ||
434 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
435 | num_clips /= 2; | ||
436 | inc = 2; /* skip source rects */ | ||
437 | } | ||
438 | |||
439 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | ||
440 | if (unlikely(cmd == NULL)) { | ||
441 | DRM_ERROR("Fifo reserve failed.\n"); | ||
442 | return -ENOMEM; | ||
443 | } | ||
444 | |||
445 | memset(cmd, 0, sizeof(*cmd)); | ||
446 | |||
447 | cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT); | ||
448 | cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr)); | ||
449 | cmd->body.sid = cpu_to_le32(surf->res.id); | ||
450 | |||
451 | for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) { | ||
452 | cr->x = cpu_to_le16(clips->x1); | ||
453 | cr->y = cpu_to_le16(clips->y1); | ||
454 | cr->srcx = cr->x; | ||
455 | cr->srcy = cr->y; | ||
456 | cr->w = cpu_to_le16(clips->x2 - clips->x1); | ||
457 | cr->h = cpu_to_le16(clips->y2 - clips->y1); | ||
458 | } | ||
459 | |||
460 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | ||
461 | |||
462 | return 0; | ||
463 | } | ||
464 | |||
465 | static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { | ||
466 | .destroy = vmw_framebuffer_surface_destroy, | ||
467 | .dirty = vmw_framebuffer_surface_dirty, | ||
468 | .create_handle = vmw_framebuffer_create_handle, | ||
469 | }; | ||
470 | |||
471 | int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | ||
472 | struct vmw_surface *surface, | ||
473 | struct vmw_framebuffer **out, | ||
474 | unsigned width, unsigned height) | ||
475 | |||
476 | { | ||
477 | struct drm_device *dev = dev_priv->dev; | ||
478 | struct vmw_framebuffer_surface *vfbs; | ||
479 | int ret; | ||
480 | |||
481 | vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); | ||
482 | if (!vfbs) { | ||
483 | ret = -ENOMEM; | ||
484 | goto out_err1; | ||
485 | } | ||
486 | |||
487 | ret = drm_framebuffer_init(dev, &vfbs->base.base, | ||
488 | &vmw_framebuffer_surface_funcs); | ||
489 | if (ret) | ||
490 | goto out_err2; | ||
491 | |||
492 | if (!vmw_surface_reference(surface)) { | ||
493 | DRM_ERROR("failed to reference surface %p\n", surface); | ||
494 | goto out_err3; | ||
495 | } | ||
496 | |||
497 | /* XXX get the first 3 from the surface info */ | ||
498 | vfbs->base.base.bits_per_pixel = 32; | ||
499 | vfbs->base.base.pitch = width * 32 / 4; | ||
500 | vfbs->base.base.depth = 24; | ||
501 | vfbs->base.base.width = width; | ||
502 | vfbs->base.base.height = height; | ||
503 | vfbs->base.pin = NULL; | ||
504 | vfbs->base.unpin = NULL; | ||
505 | vfbs->surface = surface; | ||
506 | mutex_init(&vfbs->work_lock); | ||
507 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | ||
508 | *out = &vfbs->base; | ||
509 | |||
510 | return 0; | ||
511 | |||
512 | out_err3: | ||
513 | drm_framebuffer_cleanup(&vfbs->base.base); | ||
514 | out_err2: | ||
515 | kfree(vfbs); | ||
516 | out_err1: | ||
517 | return ret; | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Dmabuf framebuffer code | ||
522 | */ | ||
523 | |||
524 | #define vmw_framebuffer_to_vfbd(x) \ | ||
525 | container_of(x, struct vmw_framebuffer_dmabuf, base.base) | ||
526 | |||
527 | struct vmw_framebuffer_dmabuf { | ||
528 | struct vmw_framebuffer base; | ||
529 | struct vmw_dma_buffer *buffer; | ||
530 | }; | ||
531 | |||
532 | void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | ||
533 | { | ||
534 | struct vmw_framebuffer_dmabuf *vfbd = | ||
535 | vmw_framebuffer_to_vfbd(framebuffer); | ||
536 | |||
537 | drm_framebuffer_cleanup(framebuffer); | ||
538 | vmw_dmabuf_unreference(&vfbd->buffer); | ||
539 | |||
540 | kfree(vfbd); | ||
541 | } | ||
542 | |||
543 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | ||
544 | unsigned flags, unsigned color, | ||
545 | struct drm_clip_rect *clips, | ||
546 | unsigned num_clips) | ||
547 | { | ||
548 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | ||
549 | struct drm_clip_rect norect; | ||
550 | struct { | ||
551 | uint32_t header; | ||
552 | SVGAFifoCmdUpdate body; | ||
553 | } *cmd; | ||
554 | int i, increment = 1; | ||
555 | |||
556 | if (!num_clips) { | ||
557 | num_clips = 1; | ||
558 | clips = &norect; | ||
559 | norect.x1 = norect.y1 = 0; | ||
560 | norect.x2 = framebuffer->width; | ||
561 | norect.y2 = framebuffer->height; | ||
562 | } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { | ||
563 | num_clips /= 2; | ||
564 | increment = 2; | ||
565 | } | ||
566 | |||
567 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | ||
568 | if (unlikely(cmd == NULL)) { | ||
569 | DRM_ERROR("Fifo reserve failed.\n"); | ||
570 | return -ENOMEM; | ||
571 | } | ||
572 | |||
573 | for (i = 0; i < num_clips; i++, clips += increment) { | ||
574 | cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); | ||
575 | cmd[i].body.x = cpu_to_le32(clips->x1); | ||
576 | cmd[i].body.y = cpu_to_le32(clips->y1); | ||
577 | cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); | ||
578 | cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); | ||
579 | } | ||
580 | |||
581 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | |||
586 | static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | ||
587 | .destroy = vmw_framebuffer_dmabuf_destroy, | ||
588 | .dirty = vmw_framebuffer_dmabuf_dirty, | ||
589 | .create_handle = vmw_framebuffer_create_handle, | ||
590 | }; | ||
591 | |||
592 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | ||
593 | { | ||
594 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
595 | struct vmw_framebuffer_dmabuf *vfbd = | ||
596 | vmw_framebuffer_to_vfbd(&vfb->base); | ||
597 | int ret; | ||
598 | |||
599 | vmw_overlay_pause_all(dev_priv); | ||
600 | |||
601 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | ||
602 | |||
603 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
604 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
605 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); | ||
606 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
607 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
608 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
609 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
610 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
611 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
612 | |||
613 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
614 | vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); | ||
615 | vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); | ||
616 | vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); | ||
617 | vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); | ||
618 | vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
619 | vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
620 | vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
621 | } else | ||
622 | WARN_ON(true); | ||
623 | |||
624 | vmw_overlay_resume_all(dev_priv); | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) | ||
630 | { | ||
631 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
632 | struct vmw_framebuffer_dmabuf *vfbd = | ||
633 | vmw_framebuffer_to_vfbd(&vfb->base); | ||
634 | |||
635 | if (!vfbd->buffer) { | ||
636 | WARN_ON(!vfbd->buffer); | ||
637 | return 0; | ||
638 | } | ||
639 | |||
640 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); | ||
641 | } | ||
642 | |||
643 | int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | ||
644 | struct vmw_dma_buffer *dmabuf, | ||
645 | struct vmw_framebuffer **out, | ||
646 | unsigned width, unsigned height) | ||
647 | |||
648 | { | ||
649 | struct drm_device *dev = dev_priv->dev; | ||
650 | struct vmw_framebuffer_dmabuf *vfbd; | ||
651 | int ret; | ||
652 | |||
653 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); | ||
654 | if (!vfbd) { | ||
655 | ret = -ENOMEM; | ||
656 | goto out_err1; | ||
657 | } | ||
658 | |||
659 | ret = drm_framebuffer_init(dev, &vfbd->base.base, | ||
660 | &vmw_framebuffer_dmabuf_funcs); | ||
661 | if (ret) | ||
662 | goto out_err2; | ||
663 | |||
664 | if (!vmw_dmabuf_reference(dmabuf)) { | ||
665 | DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); | ||
666 | goto out_err3; | ||
667 | } | ||
668 | |||
669 | /* XXX get the first 3 from the surface info */ | ||
670 | vfbd->base.base.bits_per_pixel = 32; | ||
671 | vfbd->base.base.pitch = width * 32 / 4; | ||
672 | vfbd->base.base.depth = 24; | ||
673 | vfbd->base.base.width = width; | ||
674 | vfbd->base.base.height = height; | ||
675 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; | ||
676 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | ||
677 | vfbd->buffer = dmabuf; | ||
678 | *out = &vfbd->base; | ||
679 | |||
680 | return 0; | ||
681 | |||
682 | out_err3: | ||
683 | drm_framebuffer_cleanup(&vfbd->base.base); | ||
684 | out_err2: | ||
685 | kfree(vfbd); | ||
686 | out_err1: | ||
687 | return ret; | ||
688 | } | ||
689 | |||
690 | /* | ||
691 | * Generic Kernel modesetting functions | ||
692 | */ | ||
693 | |||
694 | static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | ||
695 | struct drm_file *file_priv, | ||
696 | struct drm_mode_fb_cmd *mode_cmd) | ||
697 | { | ||
698 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
699 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | ||
700 | struct vmw_framebuffer *vfb = NULL; | ||
701 | struct vmw_surface *surface = NULL; | ||
702 | struct vmw_dma_buffer *bo = NULL; | ||
703 | int ret; | ||
704 | |||
705 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, | ||
706 | mode_cmd->handle, &surface); | ||
707 | if (ret) | ||
708 | goto try_dmabuf; | ||
709 | |||
710 | if (!surface->scanout) | ||
711 | goto err_not_scanout; | ||
712 | |||
713 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, | ||
714 | mode_cmd->width, mode_cmd->height); | ||
715 | |||
716 | /* vmw_user_surface_lookup takes one ref so does new_fb */ | ||
717 | vmw_surface_unreference(&surface); | ||
718 | |||
719 | if (ret) { | ||
720 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | ||
721 | return NULL; | ||
722 | } | ||
723 | return &vfb->base; | ||
724 | |||
725 | try_dmabuf: | ||
726 | DRM_INFO("%s: trying buffer\n", __func__); | ||
727 | |||
728 | ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo); | ||
729 | if (ret) { | ||
730 | DRM_ERROR("failed to find buffer: %i\n", ret); | ||
731 | return NULL; | ||
732 | } | ||
733 | |||
734 | ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, | ||
735 | mode_cmd->width, mode_cmd->height); | ||
736 | |||
737 | /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ | ||
738 | vmw_dmabuf_unreference(&bo); | ||
739 | |||
740 | if (ret) { | ||
741 | DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret); | ||
742 | return NULL; | ||
743 | } | ||
744 | |||
745 | return &vfb->base; | ||
746 | |||
747 | err_not_scanout: | ||
748 | DRM_ERROR("surface not marked as scanout\n"); | ||
749 | /* vmw_user_surface_lookup takes one ref */ | ||
750 | vmw_surface_unreference(&surface); | ||
751 | |||
752 | return NULL; | ||
753 | } | ||
754 | |||
755 | static int vmw_kms_fb_changed(struct drm_device *dev) | ||
756 | { | ||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static struct drm_mode_config_funcs vmw_kms_funcs = { | ||
761 | .fb_create = vmw_kms_fb_create, | ||
762 | .fb_changed = vmw_kms_fb_changed, | ||
763 | }; | ||
764 | |||
765 | int vmw_kms_init(struct vmw_private *dev_priv) | ||
766 | { | ||
767 | struct drm_device *dev = dev_priv->dev; | ||
768 | int ret; | ||
769 | |||
770 | drm_mode_config_init(dev); | ||
771 | dev->mode_config.funcs = &vmw_kms_funcs; | ||
772 | dev->mode_config.min_width = 1; | ||
773 | dev->mode_config.min_height = 1; | ||
774 | dev->mode_config.max_width = dev_priv->fb_max_width; | ||
775 | dev->mode_config.max_height = dev_priv->fb_max_height; | ||
776 | |||
777 | ret = vmw_kms_init_legacy_display_system(dev_priv); | ||
778 | |||
779 | return 0; | ||
780 | } | ||
781 | |||
782 | int vmw_kms_close(struct vmw_private *dev_priv) | ||
783 | { | ||
784 | /* | ||
785 | * Docs says we should take the lock before calling this function | ||
786 | * but since it destroys encoders and our destructor calls | ||
787 | * drm_encoder_cleanup which takes the lock we deadlock. | ||
788 | */ | ||
789 | drm_mode_config_cleanup(dev_priv->dev); | ||
790 | vmw_kms_close_legacy_display_system(dev_priv); | ||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, | ||
795 | struct drm_file *file_priv) | ||
796 | { | ||
797 | struct drm_vmw_cursor_bypass_arg *arg = data; | ||
798 | struct vmw_display_unit *du; | ||
799 | struct drm_mode_object *obj; | ||
800 | struct drm_crtc *crtc; | ||
801 | int ret = 0; | ||
802 | |||
803 | |||
804 | mutex_lock(&dev->mode_config.mutex); | ||
805 | if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { | ||
806 | |||
807 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | ||
808 | du = vmw_crtc_to_du(crtc); | ||
809 | du->hotspot_x = arg->xhot; | ||
810 | du->hotspot_y = arg->yhot; | ||
811 | } | ||
812 | |||
813 | mutex_unlock(&dev->mode_config.mutex); | ||
814 | return 0; | ||
815 | } | ||
816 | |||
817 | obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC); | ||
818 | if (!obj) { | ||
819 | ret = -EINVAL; | ||
820 | goto out; | ||
821 | } | ||
822 | |||
823 | crtc = obj_to_crtc(obj); | ||
824 | du = vmw_crtc_to_du(crtc); | ||
825 | |||
826 | du->hotspot_x = arg->xhot; | ||
827 | du->hotspot_y = arg->yhot; | ||
828 | |||
829 | out: | ||
830 | mutex_unlock(&dev->mode_config.mutex); | ||
831 | |||
832 | return ret; | ||
833 | } | ||
834 | |||
835 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | ||
836 | { | ||
837 | /* | ||
838 | * setup a single multimon monitor with the size | ||
839 | * of 0x0, this stops the UI from resizing when we | ||
840 | * change the framebuffer size | ||
841 | */ | ||
842 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
843 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
844 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | ||
845 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
846 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
847 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
848 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
849 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
850 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
851 | } | ||
852 | |||
853 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); | ||
854 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); | ||
855 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | ||
856 | vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH); | ||
857 | vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR); | ||
858 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); | ||
859 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | ||
860 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); | ||
861 | |||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | ||
866 | { | ||
867 | vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width); | ||
868 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height); | ||
869 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp); | ||
870 | vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth); | ||
871 | vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo); | ||
872 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); | ||
873 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); | ||
874 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); | ||
875 | |||
876 | /* TODO check for multimon */ | ||
877 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | ||
878 | |||
879 | return 0; | ||
880 | } | ||