diff options
author | Dave Airlie <airlied@redhat.com> | 2010-05-31 21:32:06 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-05-31 21:32:06 -0400 |
commit | c09a35028567ae2c11d627bf69134b87a3c0efae (patch) | |
tree | 4cc50309b11cddb237e7905b3db9005381a4a63e | |
parent | 4abe4389790d5f02569fbacdf035536ba84c7d44 (diff) | |
parent | 1ca14e75caae504fdf957cf0c1c4f3aafc886a60 (diff) |
Merge branch 'drm-vmware-fixes' into drm-testing
* drm-vmware-fixes:
drm/vmwgfx: Remove some leftover debug messages.
drm/vmwgfx: Print warnings in kernel log about bo pinning that fails.
drm/vmwgfx: Unpause overlay on update.
drm/vmwgfx: Some modesetting cleanups and fixes.
drm/vmwgfx: Don't use SVGA_REG_ENABLE in modesetting code.
drm/vmwgfx: Remove duplicate member from struct vmw_legacy_display_unit.
drm/vmwgfx: Reserve first part of VRAM for framebuffer.
drm/vmwgfx: Support older hardware.
drm/vmwgfx: Get connector status from detection function.
drm/vmwgfx: Add kernel throttling support. Bump minor.
drm/vmwgfx: Make sure to unpin old and pin new framebuffer.
drm/vmwgfx: Fix single framebuffer detection.
drm/vmwgfx: Assume larger framebuffer max size.
-rw-r--r-- | drivers/gpu/drm/vmwgfx/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 173 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 23 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 17 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 120 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 106 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | 2 |
11 files changed, 433 insertions, 149 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 1a3cb6816d1..4505e17df3f 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile | |||
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm | |||
4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ | 4 | vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ |
5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ | 5 | vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ |
6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ | 6 | vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ |
7 | vmwgfx_overlay.o | 7 | vmwgfx_overlay.o vmwgfx_fence.o |
8 | 8 | ||
9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o | 9 | obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0c9c0811f42..7597323d5a5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -318,6 +318,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | |||
318 | goto out_err3; | 318 | goto out_err3; |
319 | } | 319 | } |
320 | 320 | ||
321 | /* Need mmio memory to check for fifo pitchlock cap. */ | ||
322 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && | ||
323 | !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && | ||
324 | !vmw_fifo_have_pitchlock(dev_priv)) { | ||
325 | ret = -ENOSYS; | ||
326 | DRM_ERROR("Hardware has no pitchlock\n"); | ||
327 | goto out_err4; | ||
328 | } | ||
329 | |||
321 | dev_priv->tdev = ttm_object_device_init | 330 | dev_priv->tdev = ttm_object_device_init |
322 | (dev_priv->mem_global_ref.object, 12); | 331 | (dev_priv->mem_global_ref.object, 12); |
323 | 332 | ||
@@ -399,8 +408,6 @@ static int vmw_driver_unload(struct drm_device *dev) | |||
399 | { | 408 | { |
400 | struct vmw_private *dev_priv = vmw_priv(dev); | 409 | struct vmw_private *dev_priv = vmw_priv(dev); |
401 | 410 | ||
402 | DRM_INFO(VMWGFX_DRIVER_NAME " unload.\n"); | ||
403 | |||
404 | unregister_pm_notifier(&dev_priv->pm_nb); | 411 | unregister_pm_notifier(&dev_priv->pm_nb); |
405 | 412 | ||
406 | vmw_fb_close(dev_priv); | 413 | vmw_fb_close(dev_priv); |
@@ -546,7 +553,6 @@ static int vmw_master_create(struct drm_device *dev, | |||
546 | { | 553 | { |
547 | struct vmw_master *vmaster; | 554 | struct vmw_master *vmaster; |
548 | 555 | ||
549 | DRM_INFO("Master create.\n"); | ||
550 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); | 556 | vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); |
551 | if (unlikely(vmaster == NULL)) | 557 | if (unlikely(vmaster == NULL)) |
552 | return -ENOMEM; | 558 | return -ENOMEM; |
@@ -563,7 +569,6 @@ static void vmw_master_destroy(struct drm_device *dev, | |||
563 | { | 569 | { |
564 | struct vmw_master *vmaster = vmw_master(master); | 570 | struct vmw_master *vmaster = vmw_master(master); |
565 | 571 | ||
566 | DRM_INFO("Master destroy.\n"); | ||
567 | master->driver_priv = NULL; | 572 | master->driver_priv = NULL; |
568 | kfree(vmaster); | 573 | kfree(vmaster); |
569 | } | 574 | } |
@@ -579,8 +584,6 @@ static int vmw_master_set(struct drm_device *dev, | |||
579 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 584 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
580 | int ret = 0; | 585 | int ret = 0; |
581 | 586 | ||
582 | DRM_INFO("Master set.\n"); | ||
583 | |||
584 | if (active) { | 587 | if (active) { |
585 | BUG_ON(active != &dev_priv->fbdev_master); | 588 | BUG_ON(active != &dev_priv->fbdev_master); |
586 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); | 589 | ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); |
@@ -622,8 +625,6 @@ static void vmw_master_drop(struct drm_device *dev, | |||
622 | struct vmw_master *vmaster = vmw_master(file_priv->master); | 625 | struct vmw_master *vmaster = vmw_master(file_priv->master); |
623 | int ret; | 626 | int ret; |
624 | 627 | ||
625 | DRM_INFO("Master drop.\n"); | ||
626 | |||
627 | /** | 628 | /** |
628 | * Make sure the master doesn't disappear while we have | 629 | * Make sure the master doesn't disappear while we have |
629 | * it locked. | 630 | * it locked. |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 356dc935ec1..1341adef408 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -41,7 +41,7 @@ | |||
41 | 41 | ||
42 | #define VMWGFX_DRIVER_DATE "20100209" | 42 | #define VMWGFX_DRIVER_DATE "20100209" |
43 | #define VMWGFX_DRIVER_MAJOR 1 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
44 | #define VMWGFX_DRIVER_MINOR 0 | 44 | #define VMWGFX_DRIVER_MINOR 1 |
45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -102,6 +102,13 @@ struct vmw_surface { | |||
102 | struct vmw_cursor_snooper snooper; | 102 | struct vmw_cursor_snooper snooper; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | struct vmw_fence_queue { | ||
106 | struct list_head head; | ||
107 | struct timespec lag; | ||
108 | struct timespec lag_time; | ||
109 | spinlock_t lock; | ||
110 | }; | ||
111 | |||
105 | struct vmw_fifo_state { | 112 | struct vmw_fifo_state { |
106 | unsigned long reserved_size; | 113 | unsigned long reserved_size; |
107 | __le32 *dynamic_buffer; | 114 | __le32 *dynamic_buffer; |
@@ -115,6 +122,7 @@ struct vmw_fifo_state { | |||
115 | uint32_t capabilities; | 122 | uint32_t capabilities; |
116 | struct mutex fifo_mutex; | 123 | struct mutex fifo_mutex; |
117 | struct rw_semaphore rwsem; | 124 | struct rw_semaphore rwsem; |
125 | struct vmw_fence_queue fence_queue; | ||
118 | }; | 126 | }; |
119 | 127 | ||
120 | struct vmw_relocation { | 128 | struct vmw_relocation { |
@@ -179,6 +187,7 @@ struct vmw_private { | |||
179 | uint32_t vga_red_mask; | 187 | uint32_t vga_red_mask; |
180 | uint32_t vga_blue_mask; | 188 | uint32_t vga_blue_mask; |
181 | uint32_t vga_green_mask; | 189 | uint32_t vga_green_mask; |
190 | uint32_t vga_pitchlock; | ||
182 | 191 | ||
183 | /* | 192 | /* |
184 | * Framebuffer info. | 193 | * Framebuffer info. |
@@ -393,6 +402,7 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, | |||
393 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); | 402 | extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); |
394 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); | 403 | extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma); |
395 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); | 404 | extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); |
405 | extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); | ||
396 | 406 | ||
397 | /** | 407 | /** |
398 | * TTM glue - vmwgfx_ttm_glue.c | 408 | * TTM glue - vmwgfx_ttm_glue.c |
@@ -441,6 +451,23 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
441 | uint32_t sequence, | 451 | uint32_t sequence, |
442 | bool interruptible, | 452 | bool interruptible, |
443 | unsigned long timeout); | 453 | unsigned long timeout); |
454 | extern void vmw_update_sequence(struct vmw_private *dev_priv, | ||
455 | struct vmw_fifo_state *fifo_state); | ||
456 | |||
457 | |||
458 | /** | ||
459 | * Rudimentary fence objects currently used only for throttling - | ||
460 | * vmwgfx_fence.c | ||
461 | */ | ||
462 | |||
463 | extern void vmw_fence_queue_init(struct vmw_fence_queue *queue); | ||
464 | extern void vmw_fence_queue_takedown(struct vmw_fence_queue *queue); | ||
465 | extern int vmw_fence_push(struct vmw_fence_queue *queue, | ||
466 | uint32_t sequence); | ||
467 | extern int vmw_fence_pull(struct vmw_fence_queue *queue, | ||
468 | uint32_t signaled_sequence); | ||
469 | extern int vmw_wait_lag(struct vmw_private *dev_priv, | ||
470 | struct vmw_fence_queue *queue, uint32_t us); | ||
444 | 471 | ||
445 | /** | 472 | /** |
446 | * Kernel framebuffer - vmwgfx_fb.c | 473 | * Kernel framebuffer - vmwgfx_fb.c |
@@ -466,6 +493,9 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, | |||
466 | struct ttm_object_file *tfile, | 493 | struct ttm_object_file *tfile, |
467 | struct ttm_buffer_object *bo, | 494 | struct ttm_buffer_object *bo, |
468 | SVGA3dCmdHeader *header); | 495 | SVGA3dCmdHeader *header); |
496 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, | ||
497 | unsigned width, unsigned height, unsigned pitch, | ||
498 | unsigned bbp, unsigned depth); | ||
469 | 499 | ||
470 | /** | 500 | /** |
471 | * Overlay control - vmwgfx_overlay.c | 501 | * Overlay control - vmwgfx_overlay.c |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index dbd36b8910c..bdd67cf8331 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -669,6 +669,15 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
669 | goto out_err; | 669 | goto out_err; |
670 | 670 | ||
671 | vmw_apply_relocations(sw_context); | 671 | vmw_apply_relocations(sw_context); |
672 | |||
673 | if (arg->throttle_us) { | ||
674 | ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue, | ||
675 | arg->throttle_us); | ||
676 | |||
677 | if (unlikely(ret != 0)) | ||
678 | goto out_err; | ||
679 | } | ||
680 | |||
672 | vmw_fifo_commit(dev_priv, arg->command_size); | 681 | vmw_fifo_commit(dev_priv, arg->command_size); |
673 | 682 | ||
674 | ret = vmw_fifo_send_fence(dev_priv, &sequence); | 683 | ret = vmw_fifo_send_fence(dev_priv, &sequence); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 7421aaad8d0..181f4722258 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -132,16 +132,14 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, | |||
132 | return -EINVAL; | 132 | return -EINVAL; |
133 | } | 133 | } |
134 | 134 | ||
135 | /* without multimon its hard to resize */ | 135 | if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && |
136 | if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) && | 136 | (var->xoffset != 0 || var->yoffset != 0)) { |
137 | (var->xres != par->max_width || | 137 | DRM_ERROR("Can not handle panning without display topology\n"); |
138 | var->yres != par->max_height)) { | ||
139 | DRM_ERROR("Tried to resize, but we don't have multimon\n"); | ||
140 | return -EINVAL; | 138 | return -EINVAL; |
141 | } | 139 | } |
142 | 140 | ||
143 | if (var->xres > par->max_width || | 141 | if ((var->xoffset + var->xres) > par->max_width || |
144 | var->yres > par->max_height) { | 142 | (var->yoffset + var->yres) > par->max_height) { |
145 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | 143 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); |
146 | return -EINVAL; | 144 | return -EINVAL; |
147 | } | 145 | } |
@@ -154,8 +152,7 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
154 | struct vmw_fb_par *par = info->par; | 152 | struct vmw_fb_par *par = info->par; |
155 | struct vmw_private *vmw_priv = par->vmw_priv; | 153 | struct vmw_private *vmw_priv = par->vmw_priv; |
156 | 154 | ||
157 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 155 | if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { |
158 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
159 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 156 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); |
160 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 157 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); |
161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | 158 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); |
@@ -164,18 +161,11 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
164 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | 161 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); |
165 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 162 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
166 | 163 | ||
167 | vmw_write(vmw_priv, SVGA_REG_ENABLE, 1); | 164 | vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, |
168 | vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width); | 165 | info->fix.line_length, |
169 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height); | 166 | par->bpp, par->depth); |
170 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp); | ||
171 | vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth); | ||
172 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
173 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
174 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
175 | 167 | ||
176 | /* TODO check if pitch and offset changes */ | 168 | /* TODO check if pitch and offset changes */ |
177 | |||
178 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
179 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 169 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); |
180 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 170 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); |
181 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); | 171 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); |
@@ -183,13 +173,19 @@ static int vmw_fb_set_par(struct fb_info *info) | |||
183 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); | 173 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); |
184 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); | 174 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); |
185 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 175 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); |
176 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
186 | } else { | 177 | } else { |
187 | vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres); | 178 | vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, |
188 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres); | 179 | info->fix.line_length, |
180 | par->bpp, par->depth); | ||
189 | 181 | ||
190 | /* TODO check if pitch and offset changes */ | ||
191 | } | 182 | } |
192 | 183 | ||
184 | /* This is really helpful since if this fails the user | ||
185 | * can probably not see anything on the screen. | ||
186 | */ | ||
187 | WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); | ||
188 | |||
193 | return 0; | 189 | return 0; |
194 | } | 190 | } |
195 | 191 | ||
@@ -416,48 +412,23 @@ int vmw_fb_init(struct vmw_private *vmw_priv) | |||
416 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; | 412 | unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; |
417 | int ret; | 413 | int ret; |
418 | 414 | ||
415 | /* XXX These shouldn't be hardcoded. */ | ||
419 | initial_width = 800; | 416 | initial_width = 800; |
420 | initial_height = 600; | 417 | initial_height = 600; |
421 | 418 | ||
422 | fb_bbp = 32; | 419 | fb_bbp = 32; |
423 | fb_depth = 24; | 420 | fb_depth = 24; |
424 | 421 | ||
425 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 422 | /* XXX As shouldn't these be as well. */ |
426 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); | 423 | fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); |
427 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); | 424 | fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); |
428 | } else { | ||
429 | fb_width = min(vmw_priv->fb_max_width, initial_width); | ||
430 | fb_height = min(vmw_priv->fb_max_height, initial_height); | ||
431 | } | ||
432 | 425 | ||
433 | initial_width = min(fb_width, initial_width); | 426 | initial_width = min(fb_width, initial_width); |
434 | initial_height = min(fb_height, initial_height); | 427 | initial_height = min(fb_height, initial_height); |
435 | 428 | ||
436 | vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width); | 429 | fb_pitch = fb_width * fb_bbp / 8; |
437 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height); | 430 | fb_size = fb_pitch * fb_height; |
438 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp); | ||
439 | vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth); | ||
440 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
441 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
442 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
443 | |||
444 | fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE); | ||
445 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); | 431 | fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); |
446 | fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE); | ||
447 | |||
448 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH)); | ||
449 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT)); | ||
450 | DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH)); | ||
451 | DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT)); | ||
452 | DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL)); | ||
453 | DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH)); | ||
454 | DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE)); | ||
455 | DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK)); | ||
456 | DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK)); | ||
457 | DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK)); | ||
458 | DRM_DEBUG("fb_offset 0x%08x\n", fb_offset); | ||
459 | DRM_DEBUG("fb_pitch %u\n", fb_pitch); | ||
460 | DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024); | ||
461 | 432 | ||
462 | info = framebuffer_alloc(sizeof(*par), device); | 433 | info = framebuffer_alloc(sizeof(*par), device); |
463 | if (!info) | 434 | if (!info) |
@@ -659,6 +630,10 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, | |||
659 | goto err_unlock; | 630 | goto err_unlock; |
660 | 631 | ||
661 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); | 632 | ret = ttm_bo_validate(bo, &ne_placement, false, false, false); |
633 | |||
634 | /* Could probably bug on */ | ||
635 | WARN_ON(bo->offset != 0); | ||
636 | |||
662 | ttm_bo_unreserve(bo); | 637 | ttm_bo_unreserve(bo); |
663 | err_unlock: | 638 | err_unlock: |
664 | ttm_write_unlock(&vmw_priv->active_master->lock); | 639 | ttm_write_unlock(&vmw_priv->active_master->lock); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c new file mode 100644 index 00000000000..61eacc1b5ca --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /************************************************************************** | ||
2 | * | ||
3 | * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA | ||
4 | * All Rights Reserved. | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the | ||
8 | * "Software"), to deal in the Software without restriction, including | ||
9 | * without limitation the rights to use, copy, modify, merge, publish, | ||
10 | * distribute, sub license, and/or sell copies of the Software, and to | ||
11 | * permit persons to whom the Software is furnished to do so, subject to | ||
12 | * the following conditions: | ||
13 | * | ||
14 | * The above copyright notice and this permission notice (including the | ||
15 | * next paragraph) shall be included in all copies or substantial portions | ||
16 | * of the Software. | ||
17 | * | ||
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
25 | * | ||
26 | **************************************************************************/ | ||
27 | |||
28 | |||
29 | #include "vmwgfx_drv.h" | ||
30 | |||
31 | struct vmw_fence { | ||
32 | struct list_head head; | ||
33 | uint32_t sequence; | ||
34 | struct timespec submitted; | ||
35 | }; | ||
36 | |||
37 | void vmw_fence_queue_init(struct vmw_fence_queue *queue) | ||
38 | { | ||
39 | INIT_LIST_HEAD(&queue->head); | ||
40 | queue->lag = ns_to_timespec(0); | ||
41 | getrawmonotonic(&queue->lag_time); | ||
42 | spin_lock_init(&queue->lock); | ||
43 | } | ||
44 | |||
45 | void vmw_fence_queue_takedown(struct vmw_fence_queue *queue) | ||
46 | { | ||
47 | struct vmw_fence *fence, *next; | ||
48 | |||
49 | spin_lock(&queue->lock); | ||
50 | list_for_each_entry_safe(fence, next, &queue->head, head) { | ||
51 | kfree(fence); | ||
52 | } | ||
53 | spin_unlock(&queue->lock); | ||
54 | } | ||
55 | |||
56 | int vmw_fence_push(struct vmw_fence_queue *queue, | ||
57 | uint32_t sequence) | ||
58 | { | ||
59 | struct vmw_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL); | ||
60 | |||
61 | if (unlikely(!fence)) | ||
62 | return -ENOMEM; | ||
63 | |||
64 | fence->sequence = sequence; | ||
65 | getrawmonotonic(&fence->submitted); | ||
66 | spin_lock(&queue->lock); | ||
67 | list_add_tail(&fence->head, &queue->head); | ||
68 | spin_unlock(&queue->lock); | ||
69 | |||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | int vmw_fence_pull(struct vmw_fence_queue *queue, | ||
74 | uint32_t signaled_sequence) | ||
75 | { | ||
76 | struct vmw_fence *fence, *next; | ||
77 | struct timespec now; | ||
78 | bool updated = false; | ||
79 | |||
80 | spin_lock(&queue->lock); | ||
81 | getrawmonotonic(&now); | ||
82 | |||
83 | if (list_empty(&queue->head)) { | ||
84 | queue->lag = ns_to_timespec(0); | ||
85 | queue->lag_time = now; | ||
86 | updated = true; | ||
87 | goto out_unlock; | ||
88 | } | ||
89 | |||
90 | list_for_each_entry_safe(fence, next, &queue->head, head) { | ||
91 | if (signaled_sequence - fence->sequence > (1 << 30)) | ||
92 | continue; | ||
93 | |||
94 | queue->lag = timespec_sub(now, fence->submitted); | ||
95 | queue->lag_time = now; | ||
96 | updated = true; | ||
97 | list_del(&fence->head); | ||
98 | kfree(fence); | ||
99 | } | ||
100 | |||
101 | out_unlock: | ||
102 | spin_unlock(&queue->lock); | ||
103 | |||
104 | return (updated) ? 0 : -EBUSY; | ||
105 | } | ||
106 | |||
107 | static struct timespec vmw_timespec_add(struct timespec t1, | ||
108 | struct timespec t2) | ||
109 | { | ||
110 | t1.tv_sec += t2.tv_sec; | ||
111 | t1.tv_nsec += t2.tv_nsec; | ||
112 | if (t1.tv_nsec >= 1000000000L) { | ||
113 | t1.tv_sec += 1; | ||
114 | t1.tv_nsec -= 1000000000L; | ||
115 | } | ||
116 | |||
117 | return t1; | ||
118 | } | ||
119 | |||
120 | static struct timespec vmw_fifo_lag(struct vmw_fence_queue *queue) | ||
121 | { | ||
122 | struct timespec now; | ||
123 | |||
124 | spin_lock(&queue->lock); | ||
125 | getrawmonotonic(&now); | ||
126 | queue->lag = vmw_timespec_add(queue->lag, | ||
127 | timespec_sub(now, queue->lag_time)); | ||
128 | queue->lag_time = now; | ||
129 | spin_unlock(&queue->lock); | ||
130 | return queue->lag; | ||
131 | } | ||
132 | |||
133 | |||
134 | static bool vmw_lag_lt(struct vmw_fence_queue *queue, | ||
135 | uint32_t us) | ||
136 | { | ||
137 | struct timespec lag, cond; | ||
138 | |||
139 | cond = ns_to_timespec((s64) us * 1000); | ||
140 | lag = vmw_fifo_lag(queue); | ||
141 | return (timespec_compare(&lag, &cond) < 1); | ||
142 | } | ||
143 | |||
144 | int vmw_wait_lag(struct vmw_private *dev_priv, | ||
145 | struct vmw_fence_queue *queue, uint32_t us) | ||
146 | { | ||
147 | struct vmw_fence *fence; | ||
148 | uint32_t sequence; | ||
149 | int ret; | ||
150 | |||
151 | while (!vmw_lag_lt(queue, us)) { | ||
152 | spin_lock(&queue->lock); | ||
153 | if (list_empty(&queue->head)) | ||
154 | sequence = atomic_read(&dev_priv->fence_seq); | ||
155 | else { | ||
156 | fence = list_first_entry(&queue->head, | ||
157 | struct vmw_fence, head); | ||
158 | sequence = fence->sequence; | ||
159 | } | ||
160 | spin_unlock(&queue->lock); | ||
161 | |||
162 | ret = vmw_wait_fence(dev_priv, false, sequence, true, | ||
163 | 3*HZ); | ||
164 | |||
165 | if (unlikely(ret != 0)) | ||
166 | return ret; | ||
167 | |||
168 | (void) vmw_fence_pull(queue, sequence); | ||
169 | } | ||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | |||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 39d43a01d84..e6a1eb7ea95 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -34,6 +34,9 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
36 | 36 | ||
37 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | ||
38 | return false; | ||
39 | |||
37 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 40 | fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
38 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) | 41 | if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) |
39 | return false; | 42 | return false; |
@@ -48,6 +51,21 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
48 | return true; | 51 | return true; |
49 | } | 52 | } |
50 | 53 | ||
54 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) | ||
55 | { | ||
56 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
57 | uint32_t caps; | ||
58 | |||
59 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | ||
60 | return false; | ||
61 | |||
62 | caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); | ||
63 | if (caps & SVGA_FIFO_CAP_PITCHLOCK) | ||
64 | return true; | ||
65 | |||
66 | return false; | ||
67 | } | ||
68 | |||
51 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 69 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
52 | { | 70 | { |
53 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; |
@@ -120,7 +138,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
120 | 138 | ||
121 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); | 139 | atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence); |
122 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); | 140 | iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE); |
123 | 141 | vmw_fence_queue_init(&fifo->fence_queue); | |
124 | return vmw_fifo_send_fence(dev_priv, &dummy); | 142 | return vmw_fifo_send_fence(dev_priv, &dummy); |
125 | out_err: | 143 | out_err: |
126 | vfree(fifo->static_buffer); | 144 | vfree(fifo->static_buffer); |
@@ -159,6 +177,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
159 | dev_priv->enable_state); | 177 | dev_priv->enable_state); |
160 | 178 | ||
161 | mutex_unlock(&dev_priv->hw_mutex); | 179 | mutex_unlock(&dev_priv->hw_mutex); |
180 | vmw_fence_queue_takedown(&fifo->fence_queue); | ||
162 | 181 | ||
163 | if (likely(fifo->last_buffer != NULL)) { | 182 | if (likely(fifo->last_buffer != NULL)) { |
164 | vfree(fifo->last_buffer); | 183 | vfree(fifo->last_buffer); |
@@ -484,6 +503,8 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence) | |||
484 | fifo_state->last_buffer_add = true; | 503 | fifo_state->last_buffer_add = true; |
485 | vmw_fifo_commit(dev_priv, bytes); | 504 | vmw_fifo_commit(dev_priv, bytes); |
486 | fifo_state->last_buffer_add = false; | 505 | fifo_state->last_buffer_add = false; |
506 | (void) vmw_fence_push(&fifo_state->fence_queue, *sequence); | ||
507 | vmw_update_sequence(dev_priv, fifo_state); | ||
487 | 508 | ||
488 | out_err: | 509 | out_err: |
489 | return ret; | 510 | return ret; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 4d7cb539386..e92298a6a38 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
@@ -64,22 +64,33 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence) | |||
64 | return (busy == 0); | 64 | return (busy == 0); |
65 | } | 65 | } |
66 | 66 | ||
67 | void vmw_update_sequence(struct vmw_private *dev_priv, | ||
68 | struct vmw_fifo_state *fifo_state) | ||
69 | { | ||
70 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
71 | |||
72 | uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | ||
73 | |||
74 | if (dev_priv->last_read_sequence != sequence) { | ||
75 | dev_priv->last_read_sequence = sequence; | ||
76 | vmw_fence_pull(&fifo_state->fence_queue, sequence); | ||
77 | } | ||
78 | } | ||
67 | 79 | ||
68 | bool vmw_fence_signaled(struct vmw_private *dev_priv, | 80 | bool vmw_fence_signaled(struct vmw_private *dev_priv, |
69 | uint32_t sequence) | 81 | uint32_t sequence) |
70 | { | 82 | { |
71 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | ||
72 | struct vmw_fifo_state *fifo_state; | 83 | struct vmw_fifo_state *fifo_state; |
73 | bool ret; | 84 | bool ret; |
74 | 85 | ||
75 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 86 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) |
76 | return true; | 87 | return true; |
77 | 88 | ||
78 | dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 89 | fifo_state = &dev_priv->fifo; |
90 | vmw_update_sequence(dev_priv, fifo_state); | ||
79 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) | 91 | if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP)) |
80 | return true; | 92 | return true; |
81 | 93 | ||
82 | fifo_state = &dev_priv->fifo; | ||
83 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && | 94 | if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && |
84 | vmw_fifo_idle(dev_priv, sequence)) | 95 | vmw_fifo_idle(dev_priv, sequence)) |
85 | return true; | 96 | return true; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bbc7c4c30bc..b78dcf00185 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -30,6 +30,8 @@ | |||
30 | /* Might need a hrtimer here? */ | 30 | /* Might need a hrtimer here? */ |
31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) | 31 | #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) |
32 | 32 | ||
33 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb); | ||
34 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb); | ||
33 | 35 | ||
34 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) | 36 | void vmw_display_unit_cleanup(struct vmw_display_unit *du) |
35 | { | 37 | { |
@@ -326,6 +328,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb, | |||
326 | struct vmw_framebuffer_surface { | 328 | struct vmw_framebuffer_surface { |
327 | struct vmw_framebuffer base; | 329 | struct vmw_framebuffer base; |
328 | struct vmw_surface *surface; | 330 | struct vmw_surface *surface; |
331 | struct vmw_dma_buffer *buffer; | ||
329 | struct delayed_work d_work; | 332 | struct delayed_work d_work; |
330 | struct mutex work_lock; | 333 | struct mutex work_lock; |
331 | bool present_fs; | 334 | bool present_fs; |
@@ -500,8 +503,8 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
500 | vfbs->base.base.depth = 24; | 503 | vfbs->base.base.depth = 24; |
501 | vfbs->base.base.width = width; | 504 | vfbs->base.base.width = width; |
502 | vfbs->base.base.height = height; | 505 | vfbs->base.base.height = height; |
503 | vfbs->base.pin = NULL; | 506 | vfbs->base.pin = &vmw_surface_dmabuf_pin; |
504 | vfbs->base.unpin = NULL; | 507 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; |
505 | vfbs->surface = surface; | 508 | vfbs->surface = surface; |
506 | mutex_init(&vfbs->work_lock); | 509 | mutex_init(&vfbs->work_lock); |
507 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | 510 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); |
@@ -589,6 +592,40 @@ static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { | |||
589 | .create_handle = vmw_framebuffer_create_handle, | 592 | .create_handle = vmw_framebuffer_create_handle, |
590 | }; | 593 | }; |
591 | 594 | ||
595 | static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) | ||
596 | { | ||
597 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | ||
598 | struct vmw_framebuffer_surface *vfbs = | ||
599 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
600 | unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height; | ||
601 | int ret; | ||
602 | |||
603 | vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL); | ||
604 | if (unlikely(vfbs->buffer == NULL)) | ||
605 | return -ENOMEM; | ||
606 | |||
607 | vmw_overlay_pause_all(dev_priv); | ||
608 | ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size, | ||
609 | &vmw_vram_ne_placement, | ||
610 | false, &vmw_dmabuf_bo_free); | ||
611 | vmw_overlay_resume_all(dev_priv); | ||
612 | |||
613 | return ret; | ||
614 | } | ||
615 | |||
616 | static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) | ||
617 | { | ||
618 | struct ttm_buffer_object *bo; | ||
619 | struct vmw_framebuffer_surface *vfbs = | ||
620 | vmw_framebuffer_to_vfbs(&vfb->base); | ||
621 | |||
622 | bo = &vfbs->buffer->base; | ||
623 | ttm_bo_unref(&bo); | ||
624 | vfbs->buffer = NULL; | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
592 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | 629 | static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) |
593 | { | 630 | { |
594 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); | 631 | struct vmw_private *dev_priv = vmw_priv(vfb->base.dev); |
@@ -596,33 +633,15 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb) | |||
596 | vmw_framebuffer_to_vfbd(&vfb->base); | 633 | vmw_framebuffer_to_vfbd(&vfb->base); |
597 | int ret; | 634 | int ret; |
598 | 635 | ||
636 | |||
599 | vmw_overlay_pause_all(dev_priv); | 637 | vmw_overlay_pause_all(dev_priv); |
600 | 638 | ||
601 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); | 639 | ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer); |
602 | 640 | ||
603 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { | ||
604 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | ||
605 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, 0); | ||
606 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | ||
607 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | ||
608 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | ||
609 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | ||
610 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
611 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
612 | |||
613 | vmw_write(dev_priv, SVGA_REG_ENABLE, 1); | ||
614 | vmw_write(dev_priv, SVGA_REG_WIDTH, vfb->base.width); | ||
615 | vmw_write(dev_priv, SVGA_REG_HEIGHT, vfb->base.height); | ||
616 | vmw_write(dev_priv, SVGA_REG_BITS_PER_PIXEL, vfb->base.bits_per_pixel); | ||
617 | vmw_write(dev_priv, SVGA_REG_DEPTH, vfb->base.depth); | ||
618 | vmw_write(dev_priv, SVGA_REG_RED_MASK, 0x00ff0000); | ||
619 | vmw_write(dev_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); | ||
620 | vmw_write(dev_priv, SVGA_REG_BLUE_MASK, 0x000000ff); | ||
621 | } else | ||
622 | WARN_ON(true); | ||
623 | |||
624 | vmw_overlay_resume_all(dev_priv); | 641 | vmw_overlay_resume_all(dev_priv); |
625 | 642 | ||
643 | WARN_ON(ret != 0); | ||
644 | |||
626 | return 0; | 645 | return 0; |
627 | } | 646 | } |
628 | 647 | ||
@@ -668,7 +687,7 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
668 | 687 | ||
669 | /* XXX get the first 3 from the surface info */ | 688 | /* XXX get the first 3 from the surface info */ |
670 | vfbd->base.base.bits_per_pixel = 32; | 689 | vfbd->base.base.bits_per_pixel = 32; |
671 | vfbd->base.base.pitch = width * 32 / 4; | 690 | vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; |
672 | vfbd->base.base.depth = 24; | 691 | vfbd->base.base.depth = 24; |
673 | vfbd->base.base.width = width; | 692 | vfbd->base.base.width = width; |
674 | vfbd->base.base.height = height; | 693 | vfbd->base.base.height = height; |
@@ -765,8 +784,9 @@ int vmw_kms_init(struct vmw_private *dev_priv) | |||
765 | dev->mode_config.funcs = &vmw_kms_funcs; | 784 | dev->mode_config.funcs = &vmw_kms_funcs; |
766 | dev->mode_config.min_width = 1; | 785 | dev->mode_config.min_width = 1; |
767 | dev->mode_config.min_height = 1; | 786 | dev->mode_config.min_height = 1; |
768 | dev->mode_config.max_width = dev_priv->fb_max_width; | 787 | /* assumed largest fb size */ |
769 | dev->mode_config.max_height = dev_priv->fb_max_height; | 788 | dev->mode_config.max_width = 8192; |
789 | dev->mode_config.max_height = 8192; | ||
770 | 790 | ||
771 | ret = vmw_kms_init_legacy_display_system(dev_priv); | 791 | ret = vmw_kms_init_legacy_display_system(dev_priv); |
772 | 792 | ||
@@ -826,24 +846,25 @@ out: | |||
826 | return ret; | 846 | return ret; |
827 | } | 847 | } |
828 | 848 | ||
829 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | 849 | void vmw_kms_write_svga(struct vmw_private *vmw_priv, |
850 | unsigned width, unsigned height, unsigned pitch, | ||
851 | unsigned bbp, unsigned depth) | ||
830 | { | 852 | { |
831 | /* | 853 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) |
832 | * setup a single multimon monitor with the size | 854 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); |
833 | * of 0x0, this stops the UI from resizing when we | 855 | else if (vmw_fifo_have_pitchlock(vmw_priv)) |
834 | * change the framebuffer size | 856 | iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); |
835 | */ | 857 | vmw_write(vmw_priv, SVGA_REG_WIDTH, width); |
836 | if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) { | 858 | vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); |
837 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); | 859 | vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp); |
838 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); | 860 | vmw_write(vmw_priv, SVGA_REG_DEPTH, depth); |
839 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); | 861 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000); |
840 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | 862 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00); |
841 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | 863 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff); |
842 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0); | 864 | } |
843 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | ||
844 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | ||
845 | } | ||
846 | 865 | ||
866 | int vmw_kms_save_vga(struct vmw_private *vmw_priv) | ||
867 | { | ||
847 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); | 868 | vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH); |
848 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); | 869 | vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT); |
849 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); | 870 | vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL); |
@@ -852,6 +873,12 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
852 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); | 873 | vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK); |
853 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); | 874 | vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK); |
854 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); | 875 | vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK); |
876 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | ||
877 | vmw_priv->vga_pitchlock = | ||
878 | vmw_read(vmw_priv, SVGA_REG_PITCHLOCK); | ||
879 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
880 | vmw_priv->vga_pitchlock = | ||
881 | ioread32(vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | ||
855 | 882 | ||
856 | return 0; | 883 | return 0; |
857 | } | 884 | } |
@@ -866,9 +893,12 @@ int vmw_kms_restore_vga(struct vmw_private *vmw_priv) | |||
866 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); | 893 | vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask); |
867 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); | 894 | vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask); |
868 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); | 895 | vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask); |
869 | 896 | if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) | |
870 | /* TODO check for multimon */ | 897 | vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, |
871 | vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | 898 | vmw_priv->vga_pitchlock); |
899 | else if (vmw_fifo_have_pitchlock(vmw_priv)) | ||
900 | iowrite32(vmw_priv->vga_pitchlock, | ||
901 | vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK); | ||
872 | 902 | ||
873 | return 0; | 903 | return 0; |
874 | } | 904 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 90891593bf6..f7094dde18f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -38,6 +38,7 @@ struct vmw_legacy_display { | |||
38 | struct list_head active; | 38 | struct list_head active; |
39 | 39 | ||
40 | unsigned num_active; | 40 | unsigned num_active; |
41 | unsigned last_num_active; | ||
41 | 42 | ||
42 | struct vmw_framebuffer *fb; | 43 | struct vmw_framebuffer *fb; |
43 | }; | 44 | }; |
@@ -49,8 +50,6 @@ struct vmw_legacy_display_unit { | |||
49 | struct vmw_display_unit base; | 50 | struct vmw_display_unit base; |
50 | 51 | ||
51 | struct list_head active; | 52 | struct list_head active; |
52 | |||
53 | unsigned unit; | ||
54 | }; | 53 | }; |
55 | 54 | ||
56 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) | 55 | static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu) |
@@ -88,23 +87,44 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
88 | { | 87 | { |
89 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; | 88 | struct vmw_legacy_display *lds = dev_priv->ldu_priv; |
90 | struct vmw_legacy_display_unit *entry; | 89 | struct vmw_legacy_display_unit *entry; |
91 | struct drm_crtc *crtc; | 90 | struct drm_framebuffer *fb = NULL; |
91 | struct drm_crtc *crtc = NULL; | ||
92 | int i = 0; | 92 | int i = 0; |
93 | 93 | ||
94 | /* to stop the screen from changing size on resize */ | 94 | /* If there is no display topology the host just assumes |
95 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 0); | 95 | * that the guest will set the same layout as the host. |
96 | for (i = 0; i < lds->num_active; i++) { | 96 | */ |
97 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i); | 97 | if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) { |
98 | vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i); | 98 | int w = 0, h = 0; |
99 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, 0); | 99 | list_for_each_entry(entry, &lds->active, active) { |
100 | vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, 0); | 100 | crtc = &entry->base.crtc; |
101 | vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, 0); | 101 | w = max(w, crtc->x + crtc->mode.hdisplay); |
102 | vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, 0); | 102 | h = max(h, crtc->y + crtc->mode.vdisplay); |
103 | vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); | 103 | i++; |
104 | } | ||
105 | |||
106 | if (crtc == NULL) | ||
107 | return 0; | ||
108 | fb = entry->base.crtc.fb; | ||
109 | |||
110 | vmw_kms_write_svga(dev_priv, w, h, fb->pitch, | ||
111 | fb->bits_per_pixel, fb->depth); | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | if (!list_empty(&lds->active)) { | ||
117 | entry = list_entry(lds->active.next, typeof(*entry), active); | ||
118 | fb = entry->base.crtc.fb; | ||
119 | |||
120 | vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch, | ||
121 | fb->bits_per_pixel, fb->depth); | ||
104 | } | 122 | } |
105 | 123 | ||
106 | /* Now set the mode */ | 124 | /* Make sure we always show something. */ |
107 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, lds->num_active); | 125 | vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS, |
126 | lds->num_active ? lds->num_active : 1); | ||
127 | |||
108 | i = 0; | 128 | i = 0; |
109 | list_for_each_entry(entry, &lds->active, active) { | 129 | list_for_each_entry(entry, &lds->active, active) { |
110 | crtc = &entry->base.crtc; | 130 | crtc = &entry->base.crtc; |
@@ -120,6 +140,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) | |||
120 | i++; | 140 | i++; |
121 | } | 141 | } |
122 | 142 | ||
143 | BUG_ON(i != lds->num_active); | ||
144 | |||
145 | lds->last_num_active = lds->num_active; | ||
146 | |||
123 | return 0; | 147 | return 0; |
124 | } | 148 | } |
125 | 149 | ||
@@ -130,6 +154,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv, | |||
130 | if (list_empty(&ldu->active)) | 154 | if (list_empty(&ldu->active)) |
131 | return 0; | 155 | return 0; |
132 | 156 | ||
157 | /* Must init otherwise list_empty(&ldu->active) will not work. */ | ||
133 | list_del_init(&ldu->active); | 158 | list_del_init(&ldu->active); |
134 | if (--(ld->num_active) == 0) { | 159 | if (--(ld->num_active) == 0) { |
135 | BUG_ON(!ld->fb); | 160 | BUG_ON(!ld->fb); |
@@ -149,24 +174,29 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv, | |||
149 | struct vmw_legacy_display_unit *entry; | 174 | struct vmw_legacy_display_unit *entry; |
150 | struct list_head *at; | 175 | struct list_head *at; |
151 | 176 | ||
177 | BUG_ON(!ld->num_active && ld->fb); | ||
178 | if (vfb != ld->fb) { | ||
179 | if (ld->fb && ld->fb->unpin) | ||
180 | ld->fb->unpin(ld->fb); | ||
181 | if (vfb->pin) | ||
182 | vfb->pin(vfb); | ||
183 | ld->fb = vfb; | ||
184 | } | ||
185 | |||
152 | if (!list_empty(&ldu->active)) | 186 | if (!list_empty(&ldu->active)) |
153 | return 0; | 187 | return 0; |
154 | 188 | ||
155 | at = &ld->active; | 189 | at = &ld->active; |
156 | list_for_each_entry(entry, &ld->active, active) { | 190 | list_for_each_entry(entry, &ld->active, active) { |
157 | if (entry->unit > ldu->unit) | 191 | if (entry->base.unit > ldu->base.unit) |
158 | break; | 192 | break; |
159 | 193 | ||
160 | at = &entry->active; | 194 | at = &entry->active; |
161 | } | 195 | } |
162 | 196 | ||
163 | list_add(&ldu->active, at); | 197 | list_add(&ldu->active, at); |
164 | if (ld->num_active++ == 0) { | 198 | |
165 | BUG_ON(ld->fb); | 199 | ld->num_active++; |
166 | if (vfb->pin) | ||
167 | vfb->pin(vfb); | ||
168 | ld->fb = vfb; | ||
169 | } | ||
170 | 200 | ||
171 | return 0; | 201 | return 0; |
172 | } | 202 | } |
@@ -208,6 +238,8 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set) | |||
208 | 238 | ||
209 | /* ldu only supports one fb active at the time */ | 239 | /* ldu only supports one fb active at the time */ |
210 | if (dev_priv->ldu_priv->fb && vfb && | 240 | if (dev_priv->ldu_priv->fb && vfb && |
241 | !(dev_priv->ldu_priv->num_active == 1 && | ||
242 | !list_empty(&ldu->active)) && | ||
211 | dev_priv->ldu_priv->fb != vfb) { | 243 | dev_priv->ldu_priv->fb != vfb) { |
212 | DRM_ERROR("Multiple framebuffers not supported\n"); | 244 | DRM_ERROR("Multiple framebuffers not supported\n"); |
213 | return -EINVAL; | 245 | return -EINVAL; |
@@ -443,18 +475,16 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
443 | if (!ldu) | 475 | if (!ldu) |
444 | return -ENOMEM; | 476 | return -ENOMEM; |
445 | 477 | ||
446 | ldu->unit = unit; | 478 | ldu->base.unit = unit; |
447 | crtc = &ldu->base.crtc; | 479 | crtc = &ldu->base.crtc; |
448 | encoder = &ldu->base.encoder; | 480 | encoder = &ldu->base.encoder; |
449 | connector = &ldu->base.connector; | 481 | connector = &ldu->base.connector; |
450 | 482 | ||
483 | INIT_LIST_HEAD(&ldu->active); | ||
484 | |||
451 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, | 485 | drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, |
452 | DRM_MODE_CONNECTOR_LVDS); | 486 | DRM_MODE_CONNECTOR_LVDS); |
453 | /* Initial status */ | 487 | connector->status = vmw_ldu_connector_detect(connector); |
454 | if (unit == 0) | ||
455 | connector->status = connector_status_connected; | ||
456 | else | ||
457 | connector->status = connector_status_disconnected; | ||
458 | 488 | ||
459 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, | 489 | drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs, |
460 | DRM_MODE_ENCODER_LVDS); | 490 | DRM_MODE_ENCODER_LVDS); |
@@ -462,8 +492,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) | |||
462 | encoder->possible_crtcs = (1 << unit); | 492 | encoder->possible_crtcs = (1 << unit); |
463 | encoder->possible_clones = 0; | 493 | encoder->possible_clones = 0; |
464 | 494 | ||
465 | INIT_LIST_HEAD(&ldu->active); | ||
466 | |||
467 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); | 495 | drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs); |
468 | 496 | ||
469 | drm_connector_attach_property(connector, | 497 | drm_connector_attach_property(connector, |
@@ -487,18 +515,22 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) | |||
487 | 515 | ||
488 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); | 516 | INIT_LIST_HEAD(&dev_priv->ldu_priv->active); |
489 | dev_priv->ldu_priv->num_active = 0; | 517 | dev_priv->ldu_priv->num_active = 0; |
518 | dev_priv->ldu_priv->last_num_active = 0; | ||
490 | dev_priv->ldu_priv->fb = NULL; | 519 | dev_priv->ldu_priv->fb = NULL; |
491 | 520 | ||
492 | drm_mode_create_dirty_info_property(dev_priv->dev); | 521 | drm_mode_create_dirty_info_property(dev_priv->dev); |
493 | 522 | ||
494 | vmw_ldu_init(dev_priv, 0); | 523 | vmw_ldu_init(dev_priv, 0); |
495 | vmw_ldu_init(dev_priv, 1); | 524 | /* for old hardware without multimon only enable one display */ |
496 | vmw_ldu_init(dev_priv, 2); | 525 | if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { |
497 | vmw_ldu_init(dev_priv, 3); | 526 | vmw_ldu_init(dev_priv, 1); |
498 | vmw_ldu_init(dev_priv, 4); | 527 | vmw_ldu_init(dev_priv, 2); |
499 | vmw_ldu_init(dev_priv, 5); | 528 | vmw_ldu_init(dev_priv, 3); |
500 | vmw_ldu_init(dev_priv, 6); | 529 | vmw_ldu_init(dev_priv, 4); |
501 | vmw_ldu_init(dev_priv, 7); | 530 | vmw_ldu_init(dev_priv, 5); |
531 | vmw_ldu_init(dev_priv, 6); | ||
532 | vmw_ldu_init(dev_priv, 7); | ||
533 | } | ||
502 | 534 | ||
503 | return 0; | 535 | return 0; |
504 | } | 536 | } |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index ad566c85b07..df2036ed18d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c | |||
@@ -358,6 +358,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, | |||
358 | if (stream->buf != buf) | 358 | if (stream->buf != buf) |
359 | stream->buf = vmw_dmabuf_reference(buf); | 359 | stream->buf = vmw_dmabuf_reference(buf); |
360 | stream->saved = *arg; | 360 | stream->saved = *arg; |
361 | /* stream is no longer stopped/paused */ | ||
362 | stream->paused = false; | ||
361 | 363 | ||
362 | return 0; | 364 | return 0; |
363 | } | 365 | } |