diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 84 |
3 files changed, 84 insertions, 8 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f3e481f9aa8..201c34d1f3e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -597,6 +597,8 @@ static void vmw_lastclose(struct drm_device *dev) | |||
597 | static void vmw_master_init(struct vmw_master *vmaster) | 597 | static void vmw_master_init(struct vmw_master *vmaster) |
598 | { | 598 | { |
599 | ttm_lock_init(&vmaster->lock); | 599 | ttm_lock_init(&vmaster->lock); |
600 | INIT_LIST_HEAD(&vmaster->fb_surf); | ||
601 | mutex_init(&vmaster->fb_surf_mutex); | ||
600 | } | 602 | } |
601 | 603 | ||
602 | static int vmw_master_create(struct drm_device *dev, | 604 | static int vmw_master_create(struct drm_device *dev, |
@@ -608,7 +610,7 @@ static int vmw_master_create(struct drm_device *dev, | |||
608 | if (unlikely(vmaster == NULL)) | 610 | if (unlikely(vmaster == NULL)) |
609 | return -ENOMEM; | 611 | return -ENOMEM; |
610 | 612 | ||
611 | ttm_lock_init(&vmaster->lock); | 613 | vmw_master_init(vmaster); |
612 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 614 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
613 | master->driver_priv = vmaster; | 615 | master->driver_priv = vmaster; |
614 | 616 | ||
@@ -699,6 +701,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
699 | 701 | ||
700 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 702 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
701 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 703 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
704 | vmw_kms_idle_workqueues(vmaster); | ||
702 | 705 | ||
703 | if (unlikely((ret != 0))) { | 706 | if (unlikely((ret != 0))) { |
704 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 707 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 132cc248d22..0ab53d98310 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -151,6 +151,8 @@ struct vmw_overlay; | |||
151 | 151 | ||
152 | struct vmw_master { | 152 | struct vmw_master { |
153 | struct ttm_lock lock; | 153 | struct ttm_lock lock; |
154 | struct mutex fb_surf_mutex; | ||
155 | struct list_head fb_surf; | ||
154 | }; | 156 | }; |
155 | 157 | ||
156 | struct vmw_vga_topology_state { | 158 | struct vmw_vga_topology_state { |
@@ -519,6 +521,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
519 | unsigned bbp, unsigned depth); | 521 | unsigned bbp, unsigned depth); |
520 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 522 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
521 | struct drm_file *file_priv); | 523 | struct drm_file *file_priv); |
524 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); | ||
522 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | 525 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); |
523 | 526 | ||
524 | /** | 527 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 073b3e1c9cc..82bd3d8c0e4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface { | |||
332 | struct delayed_work d_work; | 332 | struct delayed_work d_work; |
333 | struct mutex work_lock; | 333 | struct mutex work_lock; |
334 | bool present_fs; | 334 | bool present_fs; |
335 | struct list_head head; | ||
336 | struct drm_master *master; | ||
335 | }; | 337 | }; |
336 | 338 | ||
339 | /** | ||
340 | * vmw_kms_idle_workqueues - Flush workqueues on this master | ||
341 | * | ||
342 | * @vmaster - Pointer identifying the master, for the surfaces of which | ||
343 | * we idle the dirty work queues. | ||
344 | * | ||
345 | * This function should be called with the ttm lock held in exclusive mode | ||
346 | * to idle all dirty work queues before the fifo is taken down. | ||
347 | * | ||
348 | * The work task may actually requeue itself, but after the flush returns we're | ||
349 | * sure that there's nothing to present, since the ttm lock is held in | ||
350 | * exclusive mode, so the fifo will never get used. | ||
351 | */ | ||
352 | |||
353 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster) | ||
354 | { | ||
355 | struct vmw_framebuffer_surface *entry; | ||
356 | |||
357 | mutex_lock(&vmaster->fb_surf_mutex); | ||
358 | list_for_each_entry(entry, &vmaster->fb_surf, head) { | ||
359 | if (cancel_delayed_work_sync(&entry->d_work)) | ||
360 | (void) entry->d_work.work.func(&entry->d_work.work); | ||
361 | |||
362 | (void) cancel_delayed_work_sync(&entry->d_work); | ||
363 | } | ||
364 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
365 | } | ||
366 | |||
337 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 367 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
338 | { | 368 | { |
339 | struct vmw_framebuffer_surface *vfb = | 369 | struct vmw_framebuffer_surface *vfbs = |
340 | vmw_framebuffer_to_vfbs(framebuffer); | 370 | vmw_framebuffer_to_vfbs(framebuffer); |
371 | struct vmw_master *vmaster = vmw_master(vfbs->master); | ||
372 | |||
341 | 373 | ||
342 | cancel_delayed_work_sync(&vfb->d_work); | 374 | mutex_lock(&vmaster->fb_surf_mutex); |
375 | list_del(&vfbs->head); | ||
376 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
377 | |||
378 | cancel_delayed_work_sync(&vfbs->d_work); | ||
379 | drm_master_put(&vfbs->master); | ||
343 | drm_framebuffer_cleanup(framebuffer); | 380 | drm_framebuffer_cleanup(framebuffer); |
344 | vmw_surface_unreference(&vfb->surface); | 381 | vmw_surface_unreference(&vfbs->surface); |
345 | 382 | ||
346 | kfree(framebuffer); | 383 | kfree(vfbs); |
347 | } | 384 | } |
348 | 385 | ||
349 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | 386 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) |
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | |||
362 | SVGA3dCopyRect cr; | 399 | SVGA3dCopyRect cr; |
363 | } *cmd; | 400 | } *cmd; |
364 | 401 | ||
402 | /** | ||
403 | * Strictly we should take the ttm_lock in read mode before accessing | ||
404 | * the fifo, to make sure the fifo is present and up. However, | ||
405 | * instead we flush all workqueues under the ttm lock in exclusive mode | ||
406 | * before taking down the fifo. | ||
407 | */ | ||
365 | mutex_lock(&vfbs->work_lock); | 408 | mutex_lock(&vfbs->work_lock); |
366 | if (!vfbs->present_fs) | 409 | if (!vfbs->present_fs) |
367 | goto out_unlock; | 410 | goto out_unlock; |
@@ -398,12 +441,14 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
398 | unsigned num_clips) | 441 | unsigned num_clips) |
399 | { | 442 | { |
400 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 443 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
444 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
401 | struct vmw_framebuffer_surface *vfbs = | 445 | struct vmw_framebuffer_surface *vfbs = |
402 | vmw_framebuffer_to_vfbs(framebuffer); | 446 | vmw_framebuffer_to_vfbs(framebuffer); |
403 | struct vmw_surface *surf = vfbs->surface; | 447 | struct vmw_surface *surf = vfbs->surface; |
404 | struct drm_clip_rect norect; | 448 | struct drm_clip_rect norect; |
405 | SVGA3dCopyRect *cr; | 449 | SVGA3dCopyRect *cr; |
406 | int i, inc = 1; | 450 | int i, inc = 1; |
451 | int ret; | ||
407 | 452 | ||
408 | struct { | 453 | struct { |
409 | SVGA3dCmdHeader header; | 454 | SVGA3dCmdHeader header; |
@@ -411,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
411 | SVGA3dCopyRect cr; | 456 | SVGA3dCopyRect cr; |
412 | } *cmd; | 457 | } *cmd; |
413 | 458 | ||
459 | if (unlikely(vfbs->master != file_priv->master)) | ||
460 | return -EINVAL; | ||
461 | |||
462 | ret = ttm_read_lock(&vmaster->lock, true); | ||
463 | if (unlikely(ret != 0)) | ||
464 | return ret; | ||
465 | |||
414 | if (!num_clips || | 466 | if (!num_clips || |
415 | !(dev_priv->fifo.capabilities & | 467 | !(dev_priv->fifo.capabilities & |
416 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | 468 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { |
@@ -426,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
426 | */ | 478 | */ |
427 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | 479 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); |
428 | } | 480 | } |
481 | ttm_read_unlock(&vmaster->lock); | ||
429 | return 0; | 482 | return 0; |
430 | } | 483 | } |
431 | 484 | ||
@@ -443,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
443 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 496 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); |
444 | if (unlikely(cmd == NULL)) { | 497 | if (unlikely(cmd == NULL)) { |
445 | DRM_ERROR("Fifo reserve failed.\n"); | 498 | DRM_ERROR("Fifo reserve failed.\n"); |
499 | ttm_read_unlock(&vmaster->lock); | ||
446 | return -ENOMEM; | 500 | return -ENOMEM; |
447 | } | 501 | } |
448 | 502 | ||
@@ -462,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
462 | } | 516 | } |
463 | 517 | ||
464 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 518 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); |
465 | 519 | ttm_read_unlock(&vmaster->lock); | |
466 | return 0; | 520 | return 0; |
467 | } | 521 | } |
468 | 522 | ||
@@ -473,6 +527,7 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { | |||
473 | }; | 527 | }; |
474 | 528 | ||
475 | static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | 529 | static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, |
530 | struct drm_file *file_priv, | ||
476 | struct vmw_surface *surface, | 531 | struct vmw_surface *surface, |
477 | struct vmw_framebuffer **out, | 532 | struct vmw_framebuffer **out, |
478 | const struct drm_mode_fb_cmd | 533 | const struct drm_mode_fb_cmd |
@@ -482,6 +537,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
482 | struct drm_device *dev = dev_priv->dev; | 537 | struct drm_device *dev = dev_priv->dev; |
483 | struct vmw_framebuffer_surface *vfbs; | 538 | struct vmw_framebuffer_surface *vfbs; |
484 | enum SVGA3dSurfaceFormat format; | 539 | enum SVGA3dSurfaceFormat format; |
540 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
485 | int ret; | 541 | int ret; |
486 | 542 | ||
487 | /* | 543 | /* |
@@ -546,8 +602,14 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
546 | vfbs->base.pin = &vmw_surface_dmabuf_pin; | 602 | vfbs->base.pin = &vmw_surface_dmabuf_pin; |
547 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; | 603 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; |
548 | vfbs->surface = surface; | 604 | vfbs->surface = surface; |
605 | vfbs->master = drm_master_get(file_priv->master); | ||
549 | mutex_init(&vfbs->work_lock); | 606 | mutex_init(&vfbs->work_lock); |
607 | |||
608 | mutex_lock(&vmaster->fb_surf_mutex); | ||
550 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | 609 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); |
610 | list_add_tail(&vfbs->head, &vmaster->fb_surf); | ||
611 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
612 | |||
551 | *out = &vfbs->base; | 613 | *out = &vfbs->base; |
552 | 614 | ||
553 | return 0; | 615 | return 0; |
@@ -590,13 +652,19 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
590 | unsigned num_clips) | 652 | unsigned num_clips) |
591 | { | 653 | { |
592 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 654 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
655 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
593 | struct drm_clip_rect norect; | 656 | struct drm_clip_rect norect; |
657 | int ret; | ||
594 | struct { | 658 | struct { |
595 | uint32_t header; | 659 | uint32_t header; |
596 | SVGAFifoCmdUpdate body; | 660 | SVGAFifoCmdUpdate body; |
597 | } *cmd; | 661 | } *cmd; |
598 | int i, increment = 1; | 662 | int i, increment = 1; |
599 | 663 | ||
664 | ret = ttm_read_lock(&vmaster->lock, true); | ||
665 | if (unlikely(ret != 0)) | ||
666 | return ret; | ||
667 | |||
600 | if (!num_clips) { | 668 | if (!num_clips) { |
601 | num_clips = 1; | 669 | num_clips = 1; |
602 | clips = &norect; | 670 | clips = &norect; |
@@ -611,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
611 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | 679 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); |
612 | if (unlikely(cmd == NULL)) { | 680 | if (unlikely(cmd == NULL)) { |
613 | DRM_ERROR("Fifo reserve failed.\n"); | 681 | DRM_ERROR("Fifo reserve failed.\n"); |
682 | ttm_read_unlock(&vmaster->lock); | ||
614 | return -ENOMEM; | 683 | return -ENOMEM; |
615 | } | 684 | } |
616 | 685 | ||
@@ -623,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
623 | } | 692 | } |
624 | 693 | ||
625 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 694 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); |
695 | ttm_read_unlock(&vmaster->lock); | ||
626 | 696 | ||
627 | return 0; | 697 | return 0; |
628 | } | 698 | } |
@@ -795,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
795 | if (!surface->scanout) | 865 | if (!surface->scanout) |
796 | goto err_not_scanout; | 866 | goto err_not_scanout; |
797 | 867 | ||
798 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, | 868 | ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, |
799 | mode_cmd); | 869 | &vfb, mode_cmd); |
800 | 870 | ||
801 | /* vmw_user_surface_lookup takes one ref so does new_fb */ | 871 | /* vmw_user_surface_lookup takes one ref so does new_fb */ |
802 | vmw_surface_unreference(&surface); | 872 | vmw_surface_unreference(&surface); |