aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-09-14 04:17:43 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2015-09-14 04:17:43 -0400
commit2e586a7e017a502410ba1c1a7411179e1d3fbb2b (patch)
tree44f01f5ccf6fe03bc2c081583e49b1eb5519612c
parent54c12bc374408faddbff75dbf1a6167c19af39c4 (diff)
drm/vmwgfx: Map the fifo as cached
On the guest kernel side, previously the FIFO has been mapped write- combined. This has worked since VMs up to now has not honored the mapping type and mapped the FIFO cached anyway. Since the FIFO is accessed cached by the CPU on the virtual device side, this leads to inconsistent mappings once the guest starts to honor the mapping types. So ask for cached mappings when we map the FIFO. We do this by using ioremap_cache() instead of ioremap_wc(), and remove the MTRR setup. On the TTM side, MOBs, GMRs and VRAM buffers are already requesting cached mappings for kernel- and user-space. Cc: <stable@vger.kernel.org> Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Sinclair Yeh <syeh@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
2 files changed, 2 insertions, 9 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); 752 ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
753 dev_priv->active_master = &dev_priv->fbdev_master; 753 dev_priv->active_master = &dev_priv->fbdev_master;
754 754
755 755 dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
756 dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, 756 dev_priv->mmio_size);
757 dev_priv->mmio_size);
758
759 dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
760 dev_priv->mmio_size);
761 757
762 if (unlikely(dev_priv->mmio_virt == NULL)) { 758 if (unlikely(dev_priv->mmio_virt == NULL)) {
763 ret = -ENOMEM; 759 ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
913out_err4: 909out_err4:
914 iounmap(dev_priv->mmio_virt); 910 iounmap(dev_priv->mmio_virt);
915out_err3: 911out_err3:
916 arch_phys_wc_del(dev_priv->mmio_mtrr);
917 vmw_ttm_global_release(dev_priv); 912 vmw_ttm_global_release(dev_priv);
918out_err0: 913out_err0:
919 for (i = vmw_res_context; i < vmw_res_max; ++i) 914 for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
964 959
965 ttm_object_device_release(&dev_priv->tdev); 960 ttm_object_device_release(&dev_priv->tdev);
966 iounmap(dev_priv->mmio_virt); 961 iounmap(dev_priv->mmio_virt);
967 arch_phys_wc_del(dev_priv->mmio_mtrr);
968 if (dev_priv->ctx.staged_bindings) 962 if (dev_priv->ctx.staged_bindings)
969 vmw_binding_state_free(dev_priv->ctx.staged_bindings); 963 vmw_binding_state_free(dev_priv->ctx.staged_bindings);
970 vmw_ttm_global_release(dev_priv); 964 vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index b60b41f95e74..a20f482848ee 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
376 uint32_t initial_width; 376 uint32_t initial_width;
377 uint32_t initial_height; 377 uint32_t initial_height;
378 u32 __iomem *mmio_virt; 378 u32 __iomem *mmio_virt;
379 int mmio_mtrr;
380 uint32_t capabilities; 379 uint32_t capabilities;
381 uint32_t max_gmr_ids; 380 uint32_t max_gmr_ids;
382 uint32_t max_gmr_pages; 381 uint32_t max_gmr_pages;