aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-04-02 05:39:45 -0400
committerThomas Hellstrom <thellstrom@vmware.com>2015-08-05 08:01:11 -0400
commitb9eb1a6174e58eb8beea664ffc20d152230d8004 (patch)
treec95ea95353e27545018d56b7b17f6f0cce2e5d48
parentf6b05004538ab0933c7527f10a2a6ed88c620f99 (diff)
drm/vmwgfx: Kill a bunch of sparse warnings
We're giving up all attempts to keep cpu- and device byte ordering separate. This silences sparse when compiled using make C=2 CF="-D__CHECK_ENDIAN__" Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c37
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c32
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c4
20 files changed, 103 insertions, 107 deletions
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index c9a595a78f2e..f4af9f1ef9be 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -35,7 +35,7 @@
35#include "svga_reg.h" 35#include "svga_reg.h"
36 36
37typedef uint32 PPN; 37typedef uint32 PPN;
38typedef __le64 PPN64; 38typedef u64 PPN64;
39 39
40/* 40/*
41 * 3D Hardware Version 41 * 3D Hardware Version
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index e94feb338f89..32ec52eaedd8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -695,10 +695,10 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
695 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to 695 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
696 * become available. 696 * become available.
697 */ 697 */
698int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, 698static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
699 struct drm_mm_node *node, 699 struct drm_mm_node *node,
700 size_t size, 700 size_t size,
701 bool interruptible) 701 bool interruptible)
702{ 702{
703 struct vmw_cmdbuf_alloc_info info; 703 struct vmw_cmdbuf_alloc_info info;
704 704
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index a8e370a55e90..2aa8bb818739 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -135,9 +135,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
135 return; 135 return;
136 } 136 }
137 137
138 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY); 138 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
139 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 139 cmd->header.size = sizeof(cmd->body);
140 cmd->body.cid = cpu_to_le32(res->id); 140 cmd->body.cid = res->id;
141 141
142 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 142 vmw_fifo_commit(dev_priv, sizeof(*cmd));
143 vmw_fifo_resource_dec(dev_priv); 143 vmw_fifo_resource_dec(dev_priv);
@@ -215,9 +215,9 @@ static int vmw_context_init(struct vmw_private *dev_priv,
215 return -ENOMEM; 215 return -ENOMEM;
216 } 216 }
217 217
218 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE); 218 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
219 cmd->header.size = cpu_to_le32(sizeof(cmd->body)); 219 cmd->header.size = sizeof(cmd->body);
220 cmd->body.cid = cpu_to_le32(res->id); 220 cmd->body.cid = res->id;
221 221
222 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 222 vmw_fifo_commit(dev_priv, sizeof(*cmd));
223 vmw_fifo_resource_inc(dev_priv); 223 vmw_fifo_resource_inc(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 4b9344dd6c27..9b4f0939d7bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -225,7 +225,7 @@ int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
225 if (unlikely(ret != 0)) 225 if (unlikely(ret != 0))
226 return ret; 226 return ret;
227 227
228 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 228 ret = ttm_bo_reserve(bo, interruptible, false, false, NULL);
229 if (unlikely(ret != 0)) 229 if (unlikely(ret != 0))
230 goto err; 230 goto err;
231 231
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 18921444672f..ab67d2a73516 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1225,7 +1225,7 @@ static void vmw_master_drop(struct drm_device *dev,
1225 * @dev_priv: Pointer to device private struct. 1225 * @dev_priv: Pointer to device private struct.
1226 * Needs the reservation sem to be held in non-exclusive mode. 1226 * Needs the reservation sem to be held in non-exclusive mode.
1227 */ 1227 */
1228void __vmw_svga_enable(struct vmw_private *dev_priv) 1228static void __vmw_svga_enable(struct vmw_private *dev_priv)
1229{ 1229{
1230 spin_lock(&dev_priv->svga_lock); 1230 spin_lock(&dev_priv->svga_lock);
1231 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1231 if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
@@ -1254,7 +1254,7 @@ void vmw_svga_enable(struct vmw_private *dev_priv)
1254 * Needs the reservation sem to be held in exclusive mode. 1254 * Needs the reservation sem to be held in exclusive mode.
1255 * Will not empty VRAM. VRAM must be emptied by caller. 1255 * Will not empty VRAM. VRAM must be emptied by caller.
1256 */ 1256 */
1257void __vmw_svga_disable(struct vmw_private *dev_priv) 1257static void __vmw_svga_disable(struct vmw_private *dev_priv)
1258{ 1258{
1259 spin_lock(&dev_priv->svga_lock); 1259 spin_lock(&dev_priv->svga_lock);
1260 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1260 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 9ae573640156..c9ea9b1277b0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -178,8 +178,8 @@ struct vmw_marker_queue {
178 178
179struct vmw_fifo_state { 179struct vmw_fifo_state {
180 unsigned long reserved_size; 180 unsigned long reserved_size;
181 __le32 *dynamic_buffer; 181 u32 *dynamic_buffer;
182 __le32 *static_buffer; 182 u32 *static_buffer;
183 unsigned long static_buffer_size; 183 unsigned long static_buffer_size;
184 bool using_bounce_buffer; 184 bool using_bounce_buffer;
185 uint32_t capabilities; 185 uint32_t capabilities;
@@ -405,7 +405,7 @@ struct vmw_private {
405 uint32_t stdu_max_height; 405 uint32_t stdu_max_height;
406 uint32_t initial_width; 406 uint32_t initial_width;
407 uint32_t initial_height; 407 uint32_t initial_height;
408 __le32 __iomem *mmio_virt; 408 u32 __iomem *mmio_virt;
409 int mmio_mtrr; 409 int mmio_mtrr;
410 uint32_t capabilities; 410 uint32_t capabilities;
411 uint32_t max_gmr_ids; 411 uint32_t max_gmr_ids;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 64dba53ca54c..40fdd0258664 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1850,7 +1850,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1850 uint32_t size_remaining = *size; 1850 uint32_t size_remaining = *size;
1851 uint32_t cmd_id; 1851 uint32_t cmd_id;
1852 1852
1853 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 1853 cmd_id = ((uint32_t *)buf)[0];
1854 switch (cmd_id) { 1854 switch (cmd_id) {
1855 case SVGA_CMD_UPDATE: 1855 case SVGA_CMD_UPDATE:
1856 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate); 1856 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
@@ -2066,14 +2066,14 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
2066 const struct vmw_cmd_entry *entry; 2066 const struct vmw_cmd_entry *entry;
2067 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS; 2067 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2068 2068
2069 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]); 2069 cmd_id = ((uint32_t *)buf)[0];
2070 /* Handle any none 3D commands */ 2070 /* Handle any none 3D commands */
2071 if (unlikely(cmd_id < SVGA_CMD_MAX)) 2071 if (unlikely(cmd_id < SVGA_CMD_MAX))
2072 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size); 2072 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2073 2073
2074 2074
2075 cmd_id = le32_to_cpu(header->id); 2075 cmd_id = header->id;
2076 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); 2076 *size = header->size + sizeof(SVGA3dCmdHeader);
2077 2077
2078 cmd_id -= SVGA_3D_CMD_BASE; 2078 cmd_id -= SVGA_3D_CMD_BASE;
2079 if (unlikely(*size > size_remaining)) 2079 if (unlikely(*size > size_remaining))
@@ -2499,11 +2499,11 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
2499 * If the function is interrupted by a signal while sleeping, it will return 2499 * If the function is interrupted by a signal while sleeping, it will return
2500 * -ERESTARTSYS casted to a pointer error value. 2500 * -ERESTARTSYS casted to a pointer error value.
2501 */ 2501 */
2502void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, 2502static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
2503 void __user *user_commands, 2503 void __user *user_commands,
2504 void *kernel_commands, 2504 void *kernel_commands,
2505 u32 command_size, 2505 u32 command_size,
2506 struct vmw_cmdbuf_header **header) 2506 struct vmw_cmdbuf_header **header)
2507{ 2507{
2508 size_t cmdbuf_size; 2508 size_t cmdbuf_size;
2509 int ret; 2509 int ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 9dbb2031a017..9856803e7aba 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -331,7 +331,7 @@ static void vmw_deferred_io(struct fb_info *info,
331 vmw_fb_dirty_flush(par); 331 vmw_fb_dirty_flush(par);
332}; 332};
333 333
334struct fb_deferred_io vmw_defio = { 334static struct fb_deferred_io vmw_defio = {
335 .delay = VMW_DIRTY_DELAY, 335 .delay = VMW_DIRTY_DELAY,
336 .deferred_io = vmw_deferred_io, 336 .deferred_io = vmw_deferred_io,
337}; 337};
@@ -706,7 +706,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
706 info->fix.smem_len = fb_size; 706 info->fix.smem_len = fb_size;
707 707
708 info->pseudo_palette = par->pseudo_palette; 708 info->pseudo_palette = par->pseudo_palette;
709 info->screen_base = par->vmalloc; 709 info->screen_base = (char __iomem *)par->vmalloc;
710 info->screen_size = fb_size; 710 info->screen_size = fb_size;
711 711
712 info->flags = FBINFO_DEFAULT; 712 info->flags = FBINFO_DEFAULT;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 945f1e0dad92..75d6222b510a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -142,7 +142,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
142 struct vmw_fence_manager *fman = fman_from_fence(fence); 142 struct vmw_fence_manager *fman = fman_from_fence(fence);
143 struct vmw_private *dev_priv = fman->dev_priv; 143 struct vmw_private *dev_priv = fman->dev_priv;
144 144
145 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 145 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 146 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 147 if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
148 return false; 148 return false;
@@ -386,7 +386,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
386 u32 passed_seqno) 386 u32 passed_seqno)
387{ 387{
388 u32 goal_seqno; 388 u32 goal_seqno;
389 __le32 __iomem *fifo_mem; 389 u32 __iomem *fifo_mem;
390 struct vmw_fence_obj *fence; 390 struct vmw_fence_obj *fence;
391 391
392 if (likely(!fman->seqno_valid)) 392 if (likely(!fman->seqno_valid))
@@ -430,7 +430,7 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
430{ 430{
431 struct vmw_fence_manager *fman = fman_from_fence(fence); 431 struct vmw_fence_manager *fman = fman_from_fence(fence);
432 u32 goal_seqno; 432 u32 goal_seqno;
433 __le32 __iomem *fifo_mem; 433 u32 __iomem *fifo_mem;
434 434
435 if (fence_is_signaled_locked(&fence->base)) 435 if (fence_is_signaled_locked(&fence->base))
436 return false; 436 return false;
@@ -453,7 +453,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
453 struct list_head action_list; 453 struct list_head action_list;
454 bool needs_rerun; 454 bool needs_rerun;
455 uint32_t seqno, new_seqno; 455 uint32_t seqno, new_seqno;
456 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; 456 u32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
457 457
458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 458 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
459rerun: 459rerun:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 9b8b09f8135b..7a6cf1700745 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -31,7 +31,7 @@
31 31
32bool vmw_fifo_have_3d(struct vmw_private *dev_priv) 32bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
33{ 33{
34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 34 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
35 uint32_t fifo_min, hwversion; 35 uint32_t fifo_min, hwversion;
36 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 36 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
37 37
@@ -80,7 +80,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
80 80
81bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) 81bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
82{ 82{
83 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 83 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
84 uint32_t caps; 84 uint32_t caps;
85 85
86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
@@ -95,7 +95,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
95 95
96int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 96int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
97{ 97{
98 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 98 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
99 uint32_t max; 99 uint32_t max;
100 uint32_t min; 100 uint32_t min;
101 101
@@ -158,7 +158,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
158 158
159void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 159void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
160{ 160{
161 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 161 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
162 static DEFINE_SPINLOCK(ping_lock); 162 static DEFINE_SPINLOCK(ping_lock);
163 unsigned long irq_flags; 163 unsigned long irq_flags;
164 164
@@ -176,7 +176,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
176 176
177void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 177void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
178{ 178{
179 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 179 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
180 180
181 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 181 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
182 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 182 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
@@ -206,7 +206,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
206 206
207static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) 207static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
208{ 208{
209 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 209 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
210 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 210 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
211 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 211 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
212 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 212 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -314,7 +314,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
314 uint32_t bytes) 314 uint32_t bytes)
315{ 315{
316 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 316 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
317 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 317 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
318 uint32_t max; 318 uint32_t max;
319 uint32_t min; 319 uint32_t min;
320 uint32_t next_cmd; 320 uint32_t next_cmd;
@@ -371,7 +371,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
371 if (reserveable) 371 if (reserveable)
372 iowrite32(bytes, fifo_mem + 372 iowrite32(bytes, fifo_mem +
373 SVGA_FIFO_RESERVED); 373 SVGA_FIFO_RESERVED);
374 return fifo_mem + (next_cmd >> 2); 374 return (void __force *) (fifo_mem +
375 (next_cmd >> 2));
375 } else { 376 } else {
376 need_bounce = true; 377 need_bounce = true;
377 } 378 }
@@ -414,7 +415,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
414} 415}
415 416
416static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, 417static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
417 __le32 __iomem *fifo_mem, 418 u32 __iomem *fifo_mem,
418 uint32_t next_cmd, 419 uint32_t next_cmd,
419 uint32_t max, uint32_t min, uint32_t bytes) 420 uint32_t max, uint32_t min, uint32_t bytes)
420{ 421{
@@ -436,7 +437,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
436} 437}
437 438
438static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, 439static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
439 __le32 __iomem *fifo_mem, 440 u32 __iomem *fifo_mem,
440 uint32_t next_cmd, 441 uint32_t next_cmd,
441 uint32_t max, uint32_t min, uint32_t bytes) 442 uint32_t max, uint32_t min, uint32_t bytes)
442{ 443{
@@ -455,10 +456,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
455 } 456 }
456} 457}
457 458
458void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 459static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
459{ 460{
460 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 461 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
461 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 462 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
462 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 463 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
463 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 464 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
464 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 465 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -545,9 +546,9 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
545{ 546{
546 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 547 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
547 struct svga_fifo_cmd_fence *cmd_fence; 548 struct svga_fifo_cmd_fence *cmd_fence;
548 void *fm; 549 u32 *fm;
549 int ret = 0; 550 int ret = 0;
550 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); 551 uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
551 552
552 fm = vmw_fifo_reserve(dev_priv, bytes); 553 fm = vmw_fifo_reserve(dev_priv, bytes);
553 if (unlikely(fm == NULL)) { 554 if (unlikely(fm == NULL)) {
@@ -573,11 +574,9 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
573 return 0; 574 return 0;
574 } 575 }
575 576
576 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); 577 *fm++ = SVGA_CMD_FENCE;
577 cmd_fence = (struct svga_fifo_cmd_fence *) 578 cmd_fence = (struct svga_fifo_cmd_fence *) fm;
578 ((unsigned long)fm + sizeof(__le32)); 579 cmd_fence->fence = *seqno;
579
580 iowrite32(*seqno, &cmd_fence->fence);
581 vmw_fifo_commit_flush(dev_priv, bytes); 580 vmw_fifo_commit_flush(dev_priv, bytes);
582 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); 581 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
583 vmw_update_seqno(dev_priv, fifo_state); 582 vmw_update_seqno(dev_priv, fifo_state);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 55940bc0eb07..6db98289b8a4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -63,7 +63,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
63 break; 63 break;
64 case DRM_VMW_PARAM_FIFO_HW_VERSION: 64 case DRM_VMW_PARAM_FIFO_HW_VERSION:
65 { 65 {
66 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 66 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
67 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 67 const struct vmw_fifo_state *fifo = &dev_priv->fifo;
68 68
69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { 69 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
@@ -158,7 +158,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
158 (struct drm_vmw_get_3d_cap_arg *) data; 158 (struct drm_vmw_get_3d_cap_arg *) data;
159 struct vmw_private *dev_priv = vmw_priv(dev); 159 struct vmw_private *dev_priv = vmw_priv(dev);
160 uint32_t size; 160 uint32_t size;
161 __le32 __iomem *fifo_mem; 161 u32 __iomem *fifo_mem;
162 void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); 162 void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
163 void *bounce; 163 void *bounce;
164 int ret; 164 int ret;
@@ -239,7 +239,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
239 int ret; 239 int ret;
240 240
241 num_clips = arg->num_clips; 241 num_clips = arg->num_clips;
242 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; 242 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
243 243
244 if (unlikely(num_clips == 0)) 244 if (unlikely(num_clips == 0))
245 return 0; 245 return 0;
@@ -322,7 +322,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
322 int ret; 322 int ret;
323 323
324 num_clips = arg->num_clips; 324 num_clips = arg->num_clips;
325 clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; 325 clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
326 326
327 if (unlikely(num_clips == 0)) 327 if (unlikely(num_clips == 0))
328 return 0; 328 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 87964bb0704e..2c2bac4a0fd6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -72,7 +72,7 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
72void vmw_update_seqno(struct vmw_private *dev_priv, 72void vmw_update_seqno(struct vmw_private *dev_priv,
73 struct vmw_fifo_state *fifo_state) 73 struct vmw_fifo_state *fifo_state)
74{ 74{
75 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 75 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
77 77
78 if (dev_priv->last_read_seqno != seqno) { 78 if (dev_priv->last_read_seqno != seqno) {
@@ -178,7 +178,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
178 } 178 }
179 finish_wait(&dev_priv->fence_queue, &__wait); 179 finish_wait(&dev_priv->fence_queue, &__wait);
180 if (ret == 0 && fifo_idle) { 180 if (ret == 0 && fifo_idle) {
181 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 181 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
182 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 182 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
183 } 183 }
184 wake_up_all(&dev_priv->fence_queue); 184 wake_up_all(&dev_priv->fence_queue);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ef605b66458f..ca69ed4a3926 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -71,12 +71,12 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
71 71
72 memcpy(&cmd[1], image, image_size); 72 memcpy(&cmd[1], image, image_size);
73 73
74 cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR); 74 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
75 cmd->cursor.id = cpu_to_le32(0); 75 cmd->cursor.id = 0;
76 cmd->cursor.width = cpu_to_le32(width); 76 cmd->cursor.width = width;
77 cmd->cursor.height = cpu_to_le32(height); 77 cmd->cursor.height = height;
78 cmd->cursor.hotspotX = cpu_to_le32(hotspotX); 78 cmd->cursor.hotspotX = hotspotX;
79 cmd->cursor.hotspotY = cpu_to_le32(hotspotY); 79 cmd->cursor.hotspotY = hotspotY;
80 80
81 vmw_fifo_commit(dev_priv, cmd_size); 81 vmw_fifo_commit(dev_priv, cmd_size);
82 82
@@ -123,7 +123,7 @@ err_unreserve:
123void vmw_cursor_update_position(struct vmw_private *dev_priv, 123void vmw_cursor_update_position(struct vmw_private *dev_priv,
124 bool show, int x, int y) 124 bool show, int x, int y)
125{ 125{
126 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 126 u32 __iomem *fifo_mem = dev_priv->mmio_virt;
127 uint32_t count; 127 uint32_t count;
128 128
129 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); 129 iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
@@ -1017,14 +1017,14 @@ static const struct drm_mode_config_funcs vmw_kms_funcs = {
1017 .fb_create = vmw_kms_fb_create, 1017 .fb_create = vmw_kms_fb_create,
1018}; 1018};
1019 1019
1020int vmw_kms_generic_present(struct vmw_private *dev_priv, 1020static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1021 struct drm_file *file_priv, 1021 struct drm_file *file_priv,
1022 struct vmw_framebuffer *vfb, 1022 struct vmw_framebuffer *vfb,
1023 struct vmw_surface *surface, 1023 struct vmw_surface *surface,
1024 uint32_t sid, 1024 uint32_t sid,
1025 int32_t destX, int32_t destY, 1025 int32_t destX, int32_t destY,
1026 struct drm_vmw_rect *clips, 1026 struct drm_vmw_rect *clips,
1027 uint32_t num_clips) 1027 uint32_t num_clips)
1028{ 1028{
1029 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips, 1029 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1030 &surface->res, destX, destY, 1030 &surface->res, destX, destY,
@@ -1785,7 +1785,7 @@ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
1785 struct ttm_buffer_object *bo = &buf->base; 1785 struct ttm_buffer_object *bo = &buf->base;
1786 int ret; 1786 int ret;
1787 1787
1788 ttm_bo_reserve(bo, false, false, interruptible, 0); 1788 ttm_bo_reserve(bo, false, false, interruptible, NULL);
1789 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible, 1789 ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
1790 validate_as_mob); 1790 validate_as_mob);
1791 if (ret) 1791 if (ret)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 51721c37d15b..55038457a096 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -476,11 +476,11 @@ int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
476 476
477 memset(cmd, 0, fifo_size); 477 memset(cmd, 0, fifo_size);
478 for (i = 0; i < num_clips; i++, clips += increment) { 478 for (i = 0; i < num_clips; i++, clips += increment) {
479 cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE); 479 cmd[i].header = SVGA_CMD_UPDATE;
480 cmd[i].body.x = cpu_to_le32(clips->x1); 480 cmd[i].body.x = clips->x1;
481 cmd[i].body.y = cpu_to_le32(clips->y1); 481 cmd[i].body.y = clips->y1;
482 cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1); 482 cmd[i].body.width = clips->x2 - clips->x1;
483 cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1); 483 cmd[i].body.height = clips->y2 - clips->y1;
484 } 484 }
485 485
486 vmw_fifo_commit(dev_priv, fifo_size); 486 vmw_fifo_commit(dev_priv, fifo_size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index e0fc2485ddb1..c5897cb4e4d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -142,7 +142,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
142 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; 142 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64;
143 cmd->header.size = sizeof(cmd->body); 143 cmd->header.size = sizeof(cmd->body);
144 cmd->body.type = type; 144 cmd->body.type = type;
145 cmd->body.baseAddress = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); 145 cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT;
146 cmd->body.sizeInBytes = otable->size; 146 cmd->body.sizeInBytes = otable->size;
147 cmd->body.validSizeInBytes = 0; 147 cmd->body.validSizeInBytes = 0;
148 cmd->body.ptDepth = mob->pt_level; 148 cmd->body.ptDepth = mob->pt_level;
@@ -430,15 +430,15 @@ out_unreserve:
430 * *@addr according to the page table entry size. 430 * *@addr according to the page table entry size.
431 */ 431 */
432#if (VMW_PPN_SIZE == 8) 432#if (VMW_PPN_SIZE == 8)
433static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) 433static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
434{ 434{
435 *((__le64 *) *addr) = cpu_to_le64(val >> PAGE_SHIFT); 435 *((u64 *) *addr) = val >> PAGE_SHIFT;
436 *addr += 2; 436 *addr += 2;
437} 437}
438#else 438#else
439static void vmw_mob_assign_ppn(__le32 **addr, dma_addr_t val) 439static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
440{ 440{
441 *(*addr)++ = cpu_to_le32(val >> PAGE_SHIFT); 441 *(*addr)++ = val >> PAGE_SHIFT;
442} 442}
443#endif 443#endif
444 444
@@ -460,7 +460,7 @@ static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter,
460 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; 460 unsigned long pt_size = num_data_pages * VMW_PPN_SIZE;
461 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); 461 unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE);
462 unsigned long pt_page; 462 unsigned long pt_page;
463 __le32 *addr, *save_addr; 463 u32 *addr, *save_addr;
464 unsigned long i; 464 unsigned long i;
465 struct page *page; 465 struct page *page;
466 466
@@ -641,7 +641,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
641 cmd->header.size = sizeof(cmd->body); 641 cmd->header.size = sizeof(cmd->body);
642 cmd->body.mobid = mob_id; 642 cmd->body.mobid = mob_id;
643 cmd->body.ptDepth = mob->pt_level; 643 cmd->body.ptDepth = mob->pt_level;
644 cmd->body.base = cpu_to_le64(mob->pt_root_page >> PAGE_SHIFT); 644 cmd->body.base = mob->pt_root_page >> PAGE_SHIFT;
645 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; 645 cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE;
646 646
647 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 647 vmw_fifo_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index 9d0dd3a342eb..29d06a4cf024 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -39,12 +39,12 @@
39#define VMWGFX_IRQSTATUS_PORT 0x8 39#define VMWGFX_IRQSTATUS_PORT 0x8
40 40
41struct svga_guest_mem_descriptor { 41struct svga_guest_mem_descriptor {
42 __le32 ppn; 42 u32 ppn;
43 __le32 num_pages; 43 u32 num_pages;
44}; 44};
45 45
46struct svga_fifo_cmd_fence { 46struct svga_fifo_cmd_fence {
47 __le32 fence; 47 u32 fence;
48}; 48};
49 49
50#define SVGA_SYNC_GENERIC 1 50#define SVGA_SYNC_GENERIC 1
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 69b471af0130..be2809aaa7cb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -121,6 +121,7 @@ static void vmw_resource_release(struct kref *kref)
121 int id; 121 int id;
122 struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 122 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 123
124 write_lock(&dev_priv->resource_lock);
124 res->avail = false; 125 res->avail = false;
125 list_del_init(&res->lru_head); 126 list_del_init(&res->lru_head);
126 write_unlock(&dev_priv->resource_lock); 127 write_unlock(&dev_priv->resource_lock);
@@ -156,20 +157,17 @@ static void vmw_resource_release(struct kref *kref)
156 kfree(res); 157 kfree(res);
157 158
158 write_lock(&dev_priv->resource_lock); 159 write_lock(&dev_priv->resource_lock);
159
160 if (id != -1) 160 if (id != -1)
161 idr_remove(idr, id); 161 idr_remove(idr, id);
162 write_unlock(&dev_priv->resource_lock);
162} 163}
163 164
164void vmw_resource_unreference(struct vmw_resource **p_res) 165void vmw_resource_unreference(struct vmw_resource **p_res)
165{ 166{
166 struct vmw_resource *res = *p_res; 167 struct vmw_resource *res = *p_res;
167 struct vmw_private *dev_priv = res->dev_priv;
168 168
169 *p_res = NULL; 169 *p_res = NULL;
170 write_lock(&dev_priv->resource_lock);
171 kref_put(&res->kref, vmw_resource_release); 170 kref_put(&res->kref, vmw_resource_release);
172 write_unlock(&dev_priv->resource_lock);
173} 171}
174 172
175 173
@@ -260,17 +258,16 @@ void vmw_resource_activate(struct vmw_resource *res,
260 write_unlock(&dev_priv->resource_lock); 258 write_unlock(&dev_priv->resource_lock);
261} 259}
262 260
263struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv, 261static struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
264 struct idr *idr, int id) 262 struct idr *idr, int id)
265{ 263{
266 struct vmw_resource *res; 264 struct vmw_resource *res;
267 265
268 read_lock(&dev_priv->resource_lock); 266 read_lock(&dev_priv->resource_lock);
269 res = idr_find(idr, id); 267 res = idr_find(idr, id);
270 if (res && res->avail) 268 if (!res || !res->avail || !kref_get_unless_zero(&res->kref))
271 kref_get(&res->kref);
272 else
273 res = NULL; 269 res = NULL;
270
274 read_unlock(&dev_priv->resource_lock); 271 read_unlock(&dev_priv->resource_lock);
275 272
276 if (unlikely(res == NULL)) 273 if (unlikely(res == NULL))
@@ -1306,7 +1303,7 @@ vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1306 * @res: The resource to evict. 1303 * @res: The resource to evict.
1307 * @interruptible: Whether to wait interruptible. 1304 * @interruptible: Whether to wait interruptible.
1308 */ 1305 */
1309int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) 1306static int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1310{ 1307{
1311 struct ttm_validate_buffer val_buf; 1308 struct ttm_validate_buffer val_buf;
1312 const struct vmw_res_func *func = res->func; 1309 const struct vmw_res_func *func = res->func;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 8b5bc170d5aa..2af3fa1b1904 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -450,8 +450,8 @@ static bool vmw_sou_screen_object_flippable(struct vmw_private *dev_priv,
450 * Update the implicit fb to the current fb of this crtc. 450 * Update the implicit fb to the current fb of this crtc.
451 * Must be called with the mode_config mutex held. 451 * Must be called with the mode_config mutex held.
452 */ 452 */
453void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv, 453static void vmw_sou_update_implicit_fb(struct vmw_private *dev_priv,
454 struct drm_crtc *crtc) 454 struct drm_crtc *crtc)
455{ 455{
456 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc); 456 struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
457 457
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 6110a433ebfe..11bc60c2771a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -407,11 +407,11 @@ out:
407} 407}
408 408
409 409
410struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, 410static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
411 struct vmw_dma_buffer *buffer, 411 struct vmw_dma_buffer *buffer,
412 size_t shader_size, 412 size_t shader_size,
413 size_t offset, 413 size_t offset,
414 SVGA3dShaderType shader_type) 414 SVGA3dShaderType shader_type)
415{ 415{
416 struct vmw_shader *shader; 416 struct vmw_shader *shader;
417 struct vmw_resource *res; 417 struct vmw_resource *res;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 835f3431574f..843d7e04b376 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -220,7 +220,7 @@ static void vmw_surface_define_encode(const struct vmw_surface *srf,
220 cmd->header.size = cmd_len; 220 cmd->header.size = cmd_len;
221 cmd->body.sid = srf->res.id; 221 cmd->body.sid = srf->res.id;
222 cmd->body.surfaceFlags = srf->flags; 222 cmd->body.surfaceFlags = srf->flags;
223 cmd->body.format = cpu_to_le32(srf->format); 223 cmd->body.format = srf->format;
224 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) 224 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
225 cmd->body.face[i].numMipLevels = srf->mip_levels[i]; 225 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
226 226
@@ -1054,7 +1054,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
1054 cmd->header.size = cmd_len; 1054 cmd->header.size = cmd_len;
1055 cmd->body.sid = srf->res.id; 1055 cmd->body.sid = srf->res.id;
1056 cmd->body.surfaceFlags = srf->flags; 1056 cmd->body.surfaceFlags = srf->flags;
1057 cmd->body.format = cpu_to_le32(srf->format); 1057 cmd->body.format = srf->format;
1058 cmd->body.numMipLevels = srf->mip_levels[0]; 1058 cmd->body.numMipLevels = srf->mip_levels[0];
1059 cmd->body.multisampleCount = srf->multisample_count; 1059 cmd->body.multisampleCount = srf->multisample_count;
1060 cmd->body.autogenFilter = srf->autogen_filter; 1060 cmd->body.autogenFilter = srf->autogen_filter;