diff options
author | Dave Airlie <airlied@redhat.com> | 2010-10-05 21:47:56 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-10-05 21:47:56 -0400 |
commit | e6b46ee712b92db1cc2449cf4f65bc635366cad4 (patch) | |
tree | 522f17796797efce50841c8bd2dae6f63025a8c2 /drivers | |
parent | fb7ba2114bcd8bb51640c20bc68f89164b29b9ed (diff) | |
parent | 8aea528736bf83ba0cdde67a3c0ca0250581eade (diff) |
Merge branch 'drm-vmware-next' into drm-core-next
* drm-vmware-next:
drm/vmwgfx: Bump minor and driver date
drm/vmwgfx: Save at least one screen layout
drm/vmwgfx: Add modinfo version
drm/vmwgfx: Add a parameter to get the max fb size
drm/vmwgfx: Don't flush fb if we're in the suspended state.
drm/vmwgfx: Prune modes based on available VRAM size
drm/vmwgfx: Take the ttm lock around the dirty ioctl
drm: vmwgfx: Add a struct drm_file parameter to the dirty framebuffer callback
drm/vmwgfx: Add new-style PM hooks to improve hibernation behavior
drm/vmwgfx: Fix ACPI S3 & S4 functionality.
drm/vmwgfx: Really support other depths than 32
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/drm_crtc.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 112 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 200 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 28 |
7 files changed, 315 insertions, 52 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 37e0b4fa482a..6985cb1da72c 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, | |||
1854 | } | 1854 | } |
1855 | 1855 | ||
1856 | if (fb->funcs->dirty) { | 1856 | if (fb->funcs->dirty) { |
1857 | ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips); | 1857 | ret = fb->funcs->dirty(fb, file_priv, flags, r->color, |
1858 | clips, num_clips); | ||
1858 | } else { | 1859 | } else { |
1859 | ret = -ENOSYS; | 1860 | ret = -ENOSYS; |
1860 | goto out_err2; | 1861 | goto out_err2; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 5c845b6ec492..f2942b3c59c0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -597,6 +597,8 @@ static void vmw_lastclose(struct drm_device *dev) | |||
597 | static void vmw_master_init(struct vmw_master *vmaster) | 597 | static void vmw_master_init(struct vmw_master *vmaster) |
598 | { | 598 | { |
599 | ttm_lock_init(&vmaster->lock); | 599 | ttm_lock_init(&vmaster->lock); |
600 | INIT_LIST_HEAD(&vmaster->fb_surf); | ||
601 | mutex_init(&vmaster->fb_surf_mutex); | ||
600 | } | 602 | } |
601 | 603 | ||
602 | static int vmw_master_create(struct drm_device *dev, | 604 | static int vmw_master_create(struct drm_device *dev, |
@@ -608,7 +610,7 @@ static int vmw_master_create(struct drm_device *dev, | |||
608 | if (unlikely(vmaster == NULL)) | 610 | if (unlikely(vmaster == NULL)) |
609 | return -ENOMEM; | 611 | return -ENOMEM; |
610 | 612 | ||
611 | ttm_lock_init(&vmaster->lock); | 613 | vmw_master_init(vmaster); |
612 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); | 614 | ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); |
613 | master->driver_priv = vmaster; | 615 | master->driver_priv = vmaster; |
614 | 616 | ||
@@ -699,6 +701,7 @@ static void vmw_master_drop(struct drm_device *dev, | |||
699 | 701 | ||
700 | vmw_fp->locked_master = drm_master_get(file_priv->master); | 702 | vmw_fp->locked_master = drm_master_get(file_priv->master); |
701 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); | 703 | ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); |
704 | vmw_kms_idle_workqueues(vmaster); | ||
702 | 705 | ||
703 | if (unlikely((ret != 0))) { | 706 | if (unlikely((ret != 0))) { |
704 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); | 707 | DRM_ERROR("Unable to lock TTM at VT switch.\n"); |
@@ -751,15 +754,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
751 | * Buffer contents is moved to swappable memory. | 754 | * Buffer contents is moved to swappable memory. |
752 | */ | 755 | */ |
753 | ttm_bo_swapout_all(&dev_priv->bdev); | 756 | ttm_bo_swapout_all(&dev_priv->bdev); |
757 | |||
754 | break; | 758 | break; |
755 | case PM_POST_HIBERNATION: | 759 | case PM_POST_HIBERNATION: |
756 | case PM_POST_SUSPEND: | 760 | case PM_POST_SUSPEND: |
761 | case PM_POST_RESTORE: | ||
757 | ttm_suspend_unlock(&vmaster->lock); | 762 | ttm_suspend_unlock(&vmaster->lock); |
763 | |||
758 | break; | 764 | break; |
759 | case PM_RESTORE_PREPARE: | 765 | case PM_RESTORE_PREPARE: |
760 | break; | 766 | break; |
761 | case PM_POST_RESTORE: | ||
762 | break; | ||
763 | default: | 767 | default: |
764 | break; | 768 | break; |
765 | } | 769 | } |
@@ -770,21 +774,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, | |||
770 | * These might not be needed with the virtual SVGA device. | 774 | * These might not be needed with the virtual SVGA device. |
771 | */ | 775 | */ |
772 | 776 | ||
773 | int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) | 777 | static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) |
774 | { | 778 | { |
779 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
780 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
781 | |||
782 | if (dev_priv->num_3d_resources != 0) { | ||
783 | DRM_INFO("Can't suspend or hibernate " | ||
784 | "while 3D resources are active.\n"); | ||
785 | return -EBUSY; | ||
786 | } | ||
787 | |||
775 | pci_save_state(pdev); | 788 | pci_save_state(pdev); |
776 | pci_disable_device(pdev); | 789 | pci_disable_device(pdev); |
777 | pci_set_power_state(pdev, PCI_D3hot); | 790 | pci_set_power_state(pdev, PCI_D3hot); |
778 | return 0; | 791 | return 0; |
779 | } | 792 | } |
780 | 793 | ||
781 | int vmw_pci_resume(struct pci_dev *pdev) | 794 | static int vmw_pci_resume(struct pci_dev *pdev) |
782 | { | 795 | { |
783 | pci_set_power_state(pdev, PCI_D0); | 796 | pci_set_power_state(pdev, PCI_D0); |
784 | pci_restore_state(pdev); | 797 | pci_restore_state(pdev); |
785 | return pci_enable_device(pdev); | 798 | return pci_enable_device(pdev); |
786 | } | 799 | } |
787 | 800 | ||
801 | static int vmw_pm_suspend(struct device *kdev) | ||
802 | { | ||
803 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
804 | struct pm_message dummy; | ||
805 | |||
806 | dummy.event = 0; | ||
807 | |||
808 | return vmw_pci_suspend(pdev, dummy); | ||
809 | } | ||
810 | |||
811 | static int vmw_pm_resume(struct device *kdev) | ||
812 | { | ||
813 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
814 | |||
815 | return vmw_pci_resume(pdev); | ||
816 | } | ||
817 | |||
818 | static int vmw_pm_prepare(struct device *kdev) | ||
819 | { | ||
820 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
821 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
822 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
823 | |||
824 | /** | ||
825 | * Release 3d reference held by fbdev and potentially | ||
826 | * stop fifo. | ||
827 | */ | ||
828 | dev_priv->suspended = true; | ||
829 | if (dev_priv->enable_fb) | ||
830 | vmw_3d_resource_dec(dev_priv); | ||
831 | |||
832 | if (dev_priv->num_3d_resources != 0) { | ||
833 | |||
834 | DRM_INFO("Can't suspend or hibernate " | ||
835 | "while 3D resources are active.\n"); | ||
836 | |||
837 | if (dev_priv->enable_fb) | ||
838 | vmw_3d_resource_inc(dev_priv); | ||
839 | dev_priv->suspended = false; | ||
840 | return -EBUSY; | ||
841 | } | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | static void vmw_pm_complete(struct device *kdev) | ||
847 | { | ||
848 | struct pci_dev *pdev = to_pci_dev(kdev); | ||
849 | struct drm_device *dev = pci_get_drvdata(pdev); | ||
850 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
851 | |||
852 | /** | ||
853 | * Reclaim 3d reference held by fbdev and potentially | ||
854 | * start fifo. | ||
855 | */ | ||
856 | if (dev_priv->enable_fb) | ||
857 | vmw_3d_resource_inc(dev_priv); | ||
858 | |||
859 | dev_priv->suspended = false; | ||
860 | } | ||
861 | |||
862 | static const struct dev_pm_ops vmw_pm_ops = { | ||
863 | .prepare = vmw_pm_prepare, | ||
864 | .complete = vmw_pm_complete, | ||
865 | .suspend = vmw_pm_suspend, | ||
866 | .resume = vmw_pm_resume, | ||
867 | }; | ||
868 | |||
788 | static struct drm_driver driver = { | 869 | static struct drm_driver driver = { |
789 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | | 870 | .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | |
790 | DRIVER_MODESET, | 871 | DRIVER_MODESET, |
@@ -818,15 +899,16 @@ static struct drm_driver driver = { | |||
818 | #if defined(CONFIG_COMPAT) | 899 | #if defined(CONFIG_COMPAT) |
819 | .compat_ioctl = drm_compat_ioctl, | 900 | .compat_ioctl = drm_compat_ioctl, |
820 | #endif | 901 | #endif |
821 | }, | 902 | }, |
822 | .pci_driver = { | 903 | .pci_driver = { |
823 | .name = VMWGFX_DRIVER_NAME, | 904 | .name = VMWGFX_DRIVER_NAME, |
824 | .id_table = vmw_pci_id_list, | 905 | .id_table = vmw_pci_id_list, |
825 | .probe = vmw_probe, | 906 | .probe = vmw_probe, |
826 | .remove = vmw_remove, | 907 | .remove = vmw_remove, |
827 | .suspend = vmw_pci_suspend, | 908 | .driver = { |
828 | .resume = vmw_pci_resume | 909 | .pm = &vmw_pm_ops |
829 | }, | 910 | } |
911 | }, | ||
830 | .name = VMWGFX_DRIVER_NAME, | 912 | .name = VMWGFX_DRIVER_NAME, |
831 | .desc = VMWGFX_DRIVER_DESC, | 913 | .desc = VMWGFX_DRIVER_DESC, |
832 | .date = VMWGFX_DRIVER_DATE, | 914 | .date = VMWGFX_DRIVER_DATE, |
@@ -860,3 +942,7 @@ module_exit(vmwgfx_exit); | |||
860 | MODULE_AUTHOR("VMware Inc. and others"); | 942 | MODULE_AUTHOR("VMware Inc. and others"); |
861 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); | 943 | MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); |
862 | MODULE_LICENSE("GPL and additional rights"); | 944 | MODULE_LICENSE("GPL and additional rights"); |
945 | MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "." | ||
946 | __stringify(VMWGFX_DRIVER_MINOR) "." | ||
947 | __stringify(VMWGFX_DRIVER_PATCHLEVEL) "." | ||
948 | "0"); | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 58de6393f611..9d55fa8cd0fe 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
@@ -39,9 +39,9 @@ | |||
39 | #include "ttm/ttm_execbuf_util.h" | 39 | #include "ttm/ttm_execbuf_util.h" |
40 | #include "ttm/ttm_module.h" | 40 | #include "ttm/ttm_module.h" |
41 | 41 | ||
42 | #define VMWGFX_DRIVER_DATE "20100209" | 42 | #define VMWGFX_DRIVER_DATE "20100927" |
43 | #define VMWGFX_DRIVER_MAJOR 1 | 43 | #define VMWGFX_DRIVER_MAJOR 1 |
44 | #define VMWGFX_DRIVER_MINOR 2 | 44 | #define VMWGFX_DRIVER_MINOR 4 |
45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 | 45 | #define VMWGFX_DRIVER_PATCHLEVEL 0 |
46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 | 46 | #define VMWGFX_FILE_PAGE_OFFSET 0x00100000 |
47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) | 47 | #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) |
@@ -151,6 +151,8 @@ struct vmw_overlay; | |||
151 | 151 | ||
152 | struct vmw_master { | 152 | struct vmw_master { |
153 | struct ttm_lock lock; | 153 | struct ttm_lock lock; |
154 | struct mutex fb_surf_mutex; | ||
155 | struct list_head fb_surf; | ||
154 | }; | 156 | }; |
155 | 157 | ||
156 | struct vmw_vga_topology_state { | 158 | struct vmw_vga_topology_state { |
@@ -286,6 +288,7 @@ struct vmw_private { | |||
286 | struct vmw_master *active_master; | 288 | struct vmw_master *active_master; |
287 | struct vmw_master fbdev_master; | 289 | struct vmw_master fbdev_master; |
288 | struct notifier_block pm_nb; | 290 | struct notifier_block pm_nb; |
291 | bool suspended; | ||
289 | 292 | ||
290 | struct mutex release_mutex; | 293 | struct mutex release_mutex; |
291 | uint32_t num_3d_resources; | 294 | uint32_t num_3d_resources; |
@@ -518,6 +521,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, | |||
518 | unsigned bbp, unsigned depth); | 521 | unsigned bbp, unsigned depth); |
519 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, | 522 | int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, |
520 | struct drm_file *file_priv); | 523 | struct drm_file *file_priv); |
524 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster); | ||
525 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | ||
526 | uint32_t pitch, | ||
527 | uint32_t height); | ||
521 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); | 528 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); |
522 | 529 | ||
523 | /** | 530 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 409e172f4abf..b27a9f2887d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | |||
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var, | |||
144 | return -EINVAL; | 144 | return -EINVAL; |
145 | } | 145 | } |
146 | 146 | ||
147 | if (!vmw_kms_validate_mode_vram(vmw_priv, | ||
148 | info->fix.line_length, | ||
149 | var->yoffset + var->yres)) { | ||
150 | DRM_ERROR("Requested geom can not fit in framebuffer\n"); | ||
151 | return -EINVAL; | ||
152 | } | ||
153 | |||
147 | return 0; | 154 | return 0; |
148 | } | 155 | } |
149 | 156 | ||
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) | |||
205 | SVGAFifoCmdUpdate body; | 212 | SVGAFifoCmdUpdate body; |
206 | } *cmd; | 213 | } *cmd; |
207 | 214 | ||
215 | if (vmw_priv->suspended) | ||
216 | return; | ||
217 | |||
208 | spin_lock_irqsave(&par->dirty.lock, flags); | 218 | spin_lock_irqsave(&par->dirty.lock, flags); |
209 | if (!par->dirty.active) { | 219 | if (!par->dirty.active) { |
210 | spin_unlock_irqrestore(&par->dirty.lock, flags); | 220 | spin_unlock_irqrestore(&par->dirty.lock, flags); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index 1c7a316454d8..570d57775a58 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | |||
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, | |||
54 | case DRM_VMW_PARAM_FIFO_CAPS: | 54 | case DRM_VMW_PARAM_FIFO_CAPS: |
55 | param->value = dev_priv->fifo.capabilities; | 55 | param->value = dev_priv->fifo.capabilities; |
56 | break; | 56 | break; |
57 | case DRM_VMW_PARAM_MAX_FB_SIZE: | ||
58 | param->value = dev_priv->vram_size; | ||
59 | break; | ||
57 | default: | 60 | default: |
58 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", | 61 | DRM_ERROR("Illegal vmwgfx get param request: %d\n", |
59 | param->param); | 62 | param->param); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e882ba099f0c..87c6e6156d7d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface { | |||
332 | struct delayed_work d_work; | 332 | struct delayed_work d_work; |
333 | struct mutex work_lock; | 333 | struct mutex work_lock; |
334 | bool present_fs; | 334 | bool present_fs; |
335 | struct list_head head; | ||
336 | struct drm_master *master; | ||
335 | }; | 337 | }; |
336 | 338 | ||
339 | /** | ||
340 | * vmw_kms_idle_workqueues - Flush workqueues on this master | ||
341 | * | ||
342 | * @vmaster - Pointer identifying the master, for the surfaces of which | ||
343 | * we idle the dirty work queues. | ||
344 | * | ||
345 | * This function should be called with the ttm lock held in exclusive mode | ||
346 | * to idle all dirty work queues before the fifo is taken down. | ||
347 | * | ||
348 | * The work task may actually requeue itself, but after the flush returns we're | ||
349 | * sure that there's nothing to present, since the ttm lock is held in | ||
350 | * exclusive mode, so the fifo will never get used. | ||
351 | */ | ||
352 | |||
353 | void vmw_kms_idle_workqueues(struct vmw_master *vmaster) | ||
354 | { | ||
355 | struct vmw_framebuffer_surface *entry; | ||
356 | |||
357 | mutex_lock(&vmaster->fb_surf_mutex); | ||
358 | list_for_each_entry(entry, &vmaster->fb_surf, head) { | ||
359 | if (cancel_delayed_work_sync(&entry->d_work)) | ||
360 | (void) entry->d_work.work.func(&entry->d_work.work); | ||
361 | |||
362 | (void) cancel_delayed_work_sync(&entry->d_work); | ||
363 | } | ||
364 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
365 | } | ||
366 | |||
337 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) | 367 | void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) |
338 | { | 368 | { |
339 | struct vmw_framebuffer_surface *vfb = | 369 | struct vmw_framebuffer_surface *vfbs = |
340 | vmw_framebuffer_to_vfbs(framebuffer); | 370 | vmw_framebuffer_to_vfbs(framebuffer); |
371 | struct vmw_master *vmaster = vmw_master(vfbs->master); | ||
372 | |||
341 | 373 | ||
342 | cancel_delayed_work_sync(&vfb->d_work); | 374 | mutex_lock(&vmaster->fb_surf_mutex); |
375 | list_del(&vfbs->head); | ||
376 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
377 | |||
378 | cancel_delayed_work_sync(&vfbs->d_work); | ||
379 | drm_master_put(&vfbs->master); | ||
343 | drm_framebuffer_cleanup(framebuffer); | 380 | drm_framebuffer_cleanup(framebuffer); |
344 | vmw_surface_unreference(&vfb->surface); | 381 | vmw_surface_unreference(&vfbs->surface); |
345 | 382 | ||
346 | kfree(framebuffer); | 383 | kfree(vfbs); |
347 | } | 384 | } |
348 | 385 | ||
349 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | 386 | static void vmw_framebuffer_present_fs_callback(struct work_struct *work) |
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work) | |||
362 | SVGA3dCopyRect cr; | 399 | SVGA3dCopyRect cr; |
363 | } *cmd; | 400 | } *cmd; |
364 | 401 | ||
402 | /** | ||
403 | * Strictly we should take the ttm_lock in read mode before accessing | ||
404 | * the fifo, to make sure the fifo is present and up. However, | ||
405 | * instead we flush all workqueues under the ttm lock in exclusive mode | ||
406 | * before taking down the fifo. | ||
407 | */ | ||
365 | mutex_lock(&vfbs->work_lock); | 408 | mutex_lock(&vfbs->work_lock); |
366 | if (!vfbs->present_fs) | 409 | if (!vfbs->present_fs) |
367 | goto out_unlock; | 410 | goto out_unlock; |
@@ -392,17 +435,20 @@ out_unlock: | |||
392 | 435 | ||
393 | 436 | ||
394 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | 437 | int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, |
438 | struct drm_file *file_priv, | ||
395 | unsigned flags, unsigned color, | 439 | unsigned flags, unsigned color, |
396 | struct drm_clip_rect *clips, | 440 | struct drm_clip_rect *clips, |
397 | unsigned num_clips) | 441 | unsigned num_clips) |
398 | { | 442 | { |
399 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 443 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
444 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
400 | struct vmw_framebuffer_surface *vfbs = | 445 | struct vmw_framebuffer_surface *vfbs = |
401 | vmw_framebuffer_to_vfbs(framebuffer); | 446 | vmw_framebuffer_to_vfbs(framebuffer); |
402 | struct vmw_surface *surf = vfbs->surface; | 447 | struct vmw_surface *surf = vfbs->surface; |
403 | struct drm_clip_rect norect; | 448 | struct drm_clip_rect norect; |
404 | SVGA3dCopyRect *cr; | 449 | SVGA3dCopyRect *cr; |
405 | int i, inc = 1; | 450 | int i, inc = 1; |
451 | int ret; | ||
406 | 452 | ||
407 | struct { | 453 | struct { |
408 | SVGA3dCmdHeader header; | 454 | SVGA3dCmdHeader header; |
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
410 | SVGA3dCopyRect cr; | 456 | SVGA3dCopyRect cr; |
411 | } *cmd; | 457 | } *cmd; |
412 | 458 | ||
459 | if (unlikely(vfbs->master != file_priv->master)) | ||
460 | return -EINVAL; | ||
461 | |||
462 | ret = ttm_read_lock(&vmaster->lock, true); | ||
463 | if (unlikely(ret != 0)) | ||
464 | return ret; | ||
465 | |||
413 | if (!num_clips || | 466 | if (!num_clips || |
414 | !(dev_priv->fifo.capabilities & | 467 | !(dev_priv->fifo.capabilities & |
415 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { | 468 | SVGA_FIFO_CAP_SCREEN_OBJECT)) { |
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
425 | */ | 478 | */ |
426 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); | 479 | vmw_framebuffer_present_fs_callback(&vfbs->d_work.work); |
427 | } | 480 | } |
481 | ttm_read_unlock(&vmaster->lock); | ||
428 | return 0; | 482 | return 0; |
429 | } | 483 | } |
430 | 484 | ||
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
442 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 496 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); |
443 | if (unlikely(cmd == NULL)) { | 497 | if (unlikely(cmd == NULL)) { |
444 | DRM_ERROR("Fifo reserve failed.\n"); | 498 | DRM_ERROR("Fifo reserve failed.\n"); |
499 | ttm_read_unlock(&vmaster->lock); | ||
445 | return -ENOMEM; | 500 | return -ENOMEM; |
446 | } | 501 | } |
447 | 502 | ||
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer, | |||
461 | } | 516 | } |
462 | 517 | ||
463 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); | 518 | vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr)); |
464 | 519 | ttm_read_unlock(&vmaster->lock); | |
465 | return 0; | 520 | return 0; |
466 | } | 521 | } |
467 | 522 | ||
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { | |||
471 | .create_handle = vmw_framebuffer_create_handle, | 526 | .create_handle = vmw_framebuffer_create_handle, |
472 | }; | 527 | }; |
473 | 528 | ||
474 | int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | 529 | static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, |
475 | struct vmw_surface *surface, | 530 | struct drm_file *file_priv, |
476 | struct vmw_framebuffer **out, | 531 | struct vmw_surface *surface, |
477 | unsigned width, unsigned height) | 532 | struct vmw_framebuffer **out, |
533 | const struct drm_mode_fb_cmd | ||
534 | *mode_cmd) | ||
478 | 535 | ||
479 | { | 536 | { |
480 | struct drm_device *dev = dev_priv->dev; | 537 | struct drm_device *dev = dev_priv->dev; |
481 | struct vmw_framebuffer_surface *vfbs; | 538 | struct vmw_framebuffer_surface *vfbs; |
539 | enum SVGA3dSurfaceFormat format; | ||
540 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
482 | int ret; | 541 | int ret; |
483 | 542 | ||
543 | /* | ||
544 | * Sanity checks. | ||
545 | */ | ||
546 | |||
547 | if (unlikely(surface->mip_levels[0] != 1 || | ||
548 | surface->num_sizes != 1 || | ||
549 | surface->sizes[0].width < mode_cmd->width || | ||
550 | surface->sizes[0].height < mode_cmd->height || | ||
551 | surface->sizes[0].depth != 1)) { | ||
552 | DRM_ERROR("Incompatible surface dimensions " | ||
553 | "for requested mode.\n"); | ||
554 | return -EINVAL; | ||
555 | } | ||
556 | |||
557 | switch (mode_cmd->depth) { | ||
558 | case 32: | ||
559 | format = SVGA3D_A8R8G8B8; | ||
560 | break; | ||
561 | case 24: | ||
562 | format = SVGA3D_X8R8G8B8; | ||
563 | break; | ||
564 | case 16: | ||
565 | format = SVGA3D_R5G6B5; | ||
566 | break; | ||
567 | case 15: | ||
568 | format = SVGA3D_A1R5G5B5; | ||
569 | break; | ||
570 | default: | ||
571 | DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth); | ||
572 | return -EINVAL; | ||
573 | } | ||
574 | |||
575 | if (unlikely(format != surface->format)) { | ||
576 | DRM_ERROR("Invalid surface format for requested mode.\n"); | ||
577 | return -EINVAL; | ||
578 | } | ||
579 | |||
484 | vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); | 580 | vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL); |
485 | if (!vfbs) { | 581 | if (!vfbs) { |
486 | ret = -ENOMEM; | 582 | ret = -ENOMEM; |
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, | |||
498 | } | 594 | } |
499 | 595 | ||
500 | /* XXX get the first 3 from the surface info */ | 596 | /* XXX get the first 3 from the surface info */ |
501 | vfbs->base.base.bits_per_pixel = 32; | 597 | vfbs->base.base.bits_per_pixel = mode_cmd->bpp; |
502 | vfbs->base.base.pitch = width * 32 / 4; | 598 | vfbs->base.base.pitch = mode_cmd->pitch; |
503 | vfbs->base.base.depth = 24; | 599 | vfbs->base.base.depth = mode_cmd->depth; |
504 | vfbs->base.base.width = width; | 600 | vfbs->base.base.width = mode_cmd->width; |
505 | vfbs->base.base.height = height; | 601 | vfbs->base.base.height = mode_cmd->height; |
506 | vfbs->base.pin = &vmw_surface_dmabuf_pin; | 602 | vfbs->base.pin = &vmw_surface_dmabuf_pin; |
507 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; | 603 | vfbs->base.unpin = &vmw_surface_dmabuf_unpin; |
508 | vfbs->surface = surface; | 604 | vfbs->surface = surface; |
605 | vfbs->master = drm_master_get(file_priv->master); | ||
509 | mutex_init(&vfbs->work_lock); | 606 | mutex_init(&vfbs->work_lock); |
607 | |||
608 | mutex_lock(&vmaster->fb_surf_mutex); | ||
510 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); | 609 | INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback); |
610 | list_add_tail(&vfbs->head, &vmaster->fb_surf); | ||
611 | mutex_unlock(&vmaster->fb_surf_mutex); | ||
612 | |||
511 | *out = &vfbs->base; | 613 | *out = &vfbs->base; |
512 | 614 | ||
513 | return 0; | 615 | return 0; |
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer) | |||
544 | } | 646 | } |
545 | 647 | ||
546 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | 648 | int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, |
649 | struct drm_file *file_priv, | ||
547 | unsigned flags, unsigned color, | 650 | unsigned flags, unsigned color, |
548 | struct drm_clip_rect *clips, | 651 | struct drm_clip_rect *clips, |
549 | unsigned num_clips) | 652 | unsigned num_clips) |
550 | { | 653 | { |
551 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); | 654 | struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); |
655 | struct vmw_master *vmaster = vmw_master(file_priv->master); | ||
552 | struct drm_clip_rect norect; | 656 | struct drm_clip_rect norect; |
657 | int ret; | ||
553 | struct { | 658 | struct { |
554 | uint32_t header; | 659 | uint32_t header; |
555 | SVGAFifoCmdUpdate body; | 660 | SVGAFifoCmdUpdate body; |
556 | } *cmd; | 661 | } *cmd; |
557 | int i, increment = 1; | 662 | int i, increment = 1; |
558 | 663 | ||
664 | ret = ttm_read_lock(&vmaster->lock, true); | ||
665 | if (unlikely(ret != 0)) | ||
666 | return ret; | ||
667 | |||
559 | if (!num_clips) { | 668 | if (!num_clips) { |
560 | num_clips = 1; | 669 | num_clips = 1; |
561 | clips = &norect; | 670 | clips = &norect; |
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
570 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); | 679 | cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips); |
571 | if (unlikely(cmd == NULL)) { | 680 | if (unlikely(cmd == NULL)) { |
572 | DRM_ERROR("Fifo reserve failed.\n"); | 681 | DRM_ERROR("Fifo reserve failed.\n"); |
682 | ttm_read_unlock(&vmaster->lock); | ||
573 | return -ENOMEM; | 683 | return -ENOMEM; |
574 | } | 684 | } |
575 | 685 | ||
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer, | |||
582 | } | 692 | } |
583 | 693 | ||
584 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); | 694 | vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips); |
695 | ttm_read_unlock(&vmaster->lock); | ||
585 | 696 | ||
586 | return 0; | 697 | return 0; |
587 | } | 698 | } |
@@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb) | |||
659 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); | 770 | return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer); |
660 | } | 771 | } |
661 | 772 | ||
662 | int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | 773 | static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, |
663 | struct vmw_dma_buffer *dmabuf, | 774 | struct vmw_dma_buffer *dmabuf, |
664 | struct vmw_framebuffer **out, | 775 | struct vmw_framebuffer **out, |
665 | unsigned width, unsigned height) | 776 | const struct drm_mode_fb_cmd |
777 | *mode_cmd) | ||
666 | 778 | ||
667 | { | 779 | { |
668 | struct drm_device *dev = dev_priv->dev; | 780 | struct drm_device *dev = dev_priv->dev; |
669 | struct vmw_framebuffer_dmabuf *vfbd; | 781 | struct vmw_framebuffer_dmabuf *vfbd; |
782 | unsigned int requested_size; | ||
670 | int ret; | 783 | int ret; |
671 | 784 | ||
785 | requested_size = mode_cmd->height * mode_cmd->pitch; | ||
786 | if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) { | ||
787 | DRM_ERROR("Screen buffer object size is too small " | ||
788 | "for requested mode.\n"); | ||
789 | return -EINVAL; | ||
790 | } | ||
791 | |||
672 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); | 792 | vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL); |
673 | if (!vfbd) { | 793 | if (!vfbd) { |
674 | ret = -ENOMEM; | 794 | ret = -ENOMEM; |
@@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, | |||
685 | goto out_err3; | 805 | goto out_err3; |
686 | } | 806 | } |
687 | 807 | ||
688 | /* XXX get the first 3 from the surface info */ | 808 | vfbd->base.base.bits_per_pixel = mode_cmd->bpp; |
689 | vfbd->base.base.bits_per_pixel = 32; | 809 | vfbd->base.base.pitch = mode_cmd->pitch; |
690 | vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8; | 810 | vfbd->base.base.depth = mode_cmd->depth; |
691 | vfbd->base.base.depth = 24; | 811 | vfbd->base.base.width = mode_cmd->width; |
692 | vfbd->base.base.width = width; | 812 | vfbd->base.base.height = mode_cmd->height; |
693 | vfbd->base.base.height = height; | ||
694 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; | 813 | vfbd->base.pin = vmw_framebuffer_dmabuf_pin; |
695 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; | 814 | vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin; |
696 | vfbd->buffer = dmabuf; | 815 | vfbd->buffer = dmabuf; |
@@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
719 | struct vmw_framebuffer *vfb = NULL; | 838 | struct vmw_framebuffer *vfb = NULL; |
720 | struct vmw_surface *surface = NULL; | 839 | struct vmw_surface *surface = NULL; |
721 | struct vmw_dma_buffer *bo = NULL; | 840 | struct vmw_dma_buffer *bo = NULL; |
841 | u64 required_size; | ||
722 | int ret; | 842 | int ret; |
723 | 843 | ||
844 | /** | ||
845 | * This code should be conditioned on Screen Objects not being used. | ||
846 | * If screen objects are used, we can allocate a GMR to hold the | ||
847 | * requested framebuffer. | ||
848 | */ | ||
849 | |||
850 | required_size = mode_cmd->pitch * mode_cmd->height; | ||
851 | if (unlikely(required_size > (u64) dev_priv->vram_size)) { | ||
852 | DRM_ERROR("VRAM size is too small for requested mode.\n"); | ||
853 | return NULL; | ||
854 | } | ||
855 | |||
856 | /** | ||
857 | * End conditioned code. | ||
858 | */ | ||
859 | |||
724 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, | 860 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, |
725 | mode_cmd->handle, &surface); | 861 | mode_cmd->handle, &surface); |
726 | if (ret) | 862 | if (ret) |
@@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
729 | if (!surface->scanout) | 865 | if (!surface->scanout) |
730 | goto err_not_scanout; | 866 | goto err_not_scanout; |
731 | 867 | ||
732 | ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb, | 868 | ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface, |
733 | mode_cmd->width, mode_cmd->height); | 869 | &vfb, mode_cmd); |
734 | 870 | ||
735 | /* vmw_user_surface_lookup takes one ref so does new_fb */ | 871 | /* vmw_user_surface_lookup takes one ref so does new_fb */ |
736 | vmw_surface_unreference(&surface); | 872 | vmw_surface_unreference(&surface); |
@@ -751,7 +887,7 @@ try_dmabuf: | |||
751 | } | 887 | } |
752 | 888 | ||
753 | ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, | 889 | ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb, |
754 | mode_cmd->width, mode_cmd->height); | 890 | mode_cmd); |
755 | 891 | ||
756 | /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ | 892 | /* vmw_user_dmabuf_lookup takes one ref so does new_fb */ |
757 | vmw_dmabuf_unreference(&bo); | 893 | vmw_dmabuf_unreference(&bo); |
@@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) | |||
889 | vmw_priv->num_displays = vmw_read(vmw_priv, | 1025 | vmw_priv->num_displays = vmw_read(vmw_priv, |
890 | SVGA_REG_NUM_GUEST_DISPLAYS); | 1026 | SVGA_REG_NUM_GUEST_DISPLAYS); |
891 | 1027 | ||
1028 | if (vmw_priv->num_displays == 0) | ||
1029 | vmw_priv->num_displays = 1; | ||
1030 | |||
892 | for (i = 0; i < vmw_priv->num_displays; ++i) { | 1031 | for (i = 0; i < vmw_priv->num_displays; ++i) { |
893 | save = &vmw_priv->vga_save[i]; | 1032 | save = &vmw_priv->vga_save[i]; |
894 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); | 1033 | vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i); |
@@ -997,6 +1136,13 @@ out_unlock: | |||
997 | return ret; | 1136 | return ret; |
998 | } | 1137 | } |
999 | 1138 | ||
1139 | bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, | ||
1140 | uint32_t pitch, | ||
1141 | uint32_t height) | ||
1142 | { | ||
1143 | return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size; | ||
1144 | } | ||
1145 | |||
1000 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) | 1146 | u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) |
1001 | { | 1147 | { |
1002 | return 0; | 1148 | return 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 11cb39e3accb..a01c47ddb5bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | |||
@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | |||
427 | { | 427 | { |
428 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); | 428 | struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector); |
429 | struct drm_device *dev = connector->dev; | 429 | struct drm_device *dev = connector->dev; |
430 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
430 | struct drm_display_mode *mode = NULL; | 431 | struct drm_display_mode *mode = NULL; |
432 | struct drm_display_mode *bmode; | ||
431 | struct drm_display_mode prefmode = { DRM_MODE("preferred", | 433 | struct drm_display_mode prefmode = { DRM_MODE("preferred", |
432 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, | 434 | DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, |
433 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | 435 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, |
@@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector, | |||
443 | mode->hdisplay = ldu->pref_width; | 445 | mode->hdisplay = ldu->pref_width; |
444 | mode->vdisplay = ldu->pref_height; | 446 | mode->vdisplay = ldu->pref_height; |
445 | mode->vrefresh = drm_mode_vrefresh(mode); | 447 | mode->vrefresh = drm_mode_vrefresh(mode); |
446 | drm_mode_probed_add(connector, mode); | 448 | if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, |
449 | mode->vdisplay)) { | ||
450 | drm_mode_probed_add(connector, mode); | ||
447 | 451 | ||
448 | if (ldu->pref_mode) { | 452 | if (ldu->pref_mode) { |
449 | list_del_init(&ldu->pref_mode->head); | 453 | list_del_init(&ldu->pref_mode->head); |
450 | drm_mode_destroy(dev, ldu->pref_mode); | 454 | drm_mode_destroy(dev, ldu->pref_mode); |
451 | } | 455 | } |
452 | 456 | ||
453 | ldu->pref_mode = mode; | 457 | ldu->pref_mode = mode; |
458 | } | ||
454 | } | 459 | } |
455 | 460 | ||
456 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { | 461 | for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) { |
457 | if (vmw_ldu_connector_builtin[i].hdisplay > max_width || | 462 | bmode = &vmw_ldu_connector_builtin[i]; |
458 | vmw_ldu_connector_builtin[i].vdisplay > max_height) | 463 | if (bmode->hdisplay > max_width || |
464 | bmode->vdisplay > max_height) | ||
465 | continue; | ||
466 | |||
467 | if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, | ||
468 | bmode->vdisplay)) | ||
459 | continue; | 469 | continue; |
460 | 470 | ||
461 | mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]); | 471 | mode = drm_mode_duplicate(dev, bmode); |
462 | if (!mode) | 472 | if (!mode) |
463 | return 0; | 473 | return 0; |
464 | mode->vrefresh = drm_mode_vrefresh(mode); | 474 | mode->vrefresh = drm_mode_vrefresh(mode); |