diff options
author | Thomas Hellstrom <thellstrom@vmware.com> | 2015-04-02 05:39:45 -0400 |
---|---|---|
committer | Thomas Hellstrom <thellstrom@vmware.com> | 2015-08-05 08:01:11 -0400 |
commit | b9eb1a6174e58eb8beea664ffc20d152230d8004 (patch) | |
tree | c95ea95353e27545018d56b7b17f6f0cce2e5d48 /drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |
parent | f6b05004538ab0933c7527f10a2a6ed88c620f99 (diff) |
drm/vmwgfx: Kill a bunch of sparse warnings
We're giving up all attempts to keep cpu- and device byte ordering separate.
This silences sparse when compiled using
make C=2 CF="-D__CHECK_ENDIAN__"
Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c')
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 37 |
1 files changed, 18 insertions, 19 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 9b8b09f8135b..7a6cf1700745 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | 32 | bool vmw_fifo_have_3d(struct vmw_private *dev_priv) |
33 | { | 33 | { |
34 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 34 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
35 | uint32_t fifo_min, hwversion; | 35 | uint32_t fifo_min, hwversion; |
36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; | 36 | const struct vmw_fifo_state *fifo = &dev_priv->fifo; |
37 | 37 | ||
@@ -80,7 +80,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) | |||
80 | 80 | ||
81 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) | 81 | bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) |
82 | { | 82 | { |
83 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 83 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
84 | uint32_t caps; | 84 | uint32_t caps; |
85 | 85 | ||
86 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) | 86 | if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) |
@@ -95,7 +95,7 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) | |||
95 | 95 | ||
96 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 96 | int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
97 | { | 97 | { |
98 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 98 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
99 | uint32_t max; | 99 | uint32_t max; |
100 | uint32_t min; | 100 | uint32_t min; |
101 | 101 | ||
@@ -158,7 +158,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
158 | 158 | ||
159 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | 159 | void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) |
160 | { | 160 | { |
161 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 161 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
162 | static DEFINE_SPINLOCK(ping_lock); | 162 | static DEFINE_SPINLOCK(ping_lock); |
163 | unsigned long irq_flags; | 163 | unsigned long irq_flags; |
164 | 164 | ||
@@ -176,7 +176,7 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) | |||
176 | 176 | ||
177 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | 177 | void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) |
178 | { | 178 | { |
179 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 179 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
180 | 180 | ||
181 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | 181 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); |
182 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | 182 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
@@ -206,7 +206,7 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
206 | 206 | ||
207 | static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) | 207 | static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) |
208 | { | 208 | { |
209 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 209 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
210 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 210 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
211 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | 211 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); |
212 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 212 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
@@ -314,7 +314,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, | |||
314 | uint32_t bytes) | 314 | uint32_t bytes) |
315 | { | 315 | { |
316 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 316 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
317 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 317 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
318 | uint32_t max; | 318 | uint32_t max; |
319 | uint32_t min; | 319 | uint32_t min; |
320 | uint32_t next_cmd; | 320 | uint32_t next_cmd; |
@@ -371,7 +371,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, | |||
371 | if (reserveable) | 371 | if (reserveable) |
372 | iowrite32(bytes, fifo_mem + | 372 | iowrite32(bytes, fifo_mem + |
373 | SVGA_FIFO_RESERVED); | 373 | SVGA_FIFO_RESERVED); |
374 | return fifo_mem + (next_cmd >> 2); | 374 | return (void __force *) (fifo_mem + |
375 | (next_cmd >> 2)); | ||
375 | } else { | 376 | } else { |
376 | need_bounce = true; | 377 | need_bounce = true; |
377 | } | 378 | } |
@@ -414,7 +415,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) | |||
414 | } | 415 | } |
415 | 416 | ||
416 | static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, | 417 | static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, |
417 | __le32 __iomem *fifo_mem, | 418 | u32 __iomem *fifo_mem, |
418 | uint32_t next_cmd, | 419 | uint32_t next_cmd, |
419 | uint32_t max, uint32_t min, uint32_t bytes) | 420 | uint32_t max, uint32_t min, uint32_t bytes) |
420 | { | 421 | { |
@@ -436,7 +437,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, | |||
436 | } | 437 | } |
437 | 438 | ||
438 | static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, | 439 | static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, |
439 | __le32 __iomem *fifo_mem, | 440 | u32 __iomem *fifo_mem, |
440 | uint32_t next_cmd, | 441 | uint32_t next_cmd, |
441 | uint32_t max, uint32_t min, uint32_t bytes) | 442 | uint32_t max, uint32_t min, uint32_t bytes) |
442 | { | 443 | { |
@@ -455,10 +456,10 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, | |||
455 | } | 456 | } |
456 | } | 457 | } |
457 | 458 | ||
458 | void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) | 459 | static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) |
459 | { | 460 | { |
460 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 461 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
461 | __le32 __iomem *fifo_mem = dev_priv->mmio_virt; | 462 | u32 __iomem *fifo_mem = dev_priv->mmio_virt; |
462 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); | 463 | uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); |
463 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); | 464 | uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); |
464 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); | 465 | uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); |
@@ -545,9 +546,9 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) | |||
545 | { | 546 | { |
546 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; | 547 | struct vmw_fifo_state *fifo_state = &dev_priv->fifo; |
547 | struct svga_fifo_cmd_fence *cmd_fence; | 548 | struct svga_fifo_cmd_fence *cmd_fence; |
548 | void *fm; | 549 | u32 *fm; |
549 | int ret = 0; | 550 | int ret = 0; |
550 | uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); | 551 | uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); |
551 | 552 | ||
552 | fm = vmw_fifo_reserve(dev_priv, bytes); | 553 | fm = vmw_fifo_reserve(dev_priv, bytes); |
553 | if (unlikely(fm == NULL)) { | 554 | if (unlikely(fm == NULL)) { |
@@ -573,11 +574,9 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) | |||
573 | return 0; | 574 | return 0; |
574 | } | 575 | } |
575 | 576 | ||
576 | *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); | 577 | *fm++ = SVGA_CMD_FENCE; |
577 | cmd_fence = (struct svga_fifo_cmd_fence *) | 578 | cmd_fence = (struct svga_fifo_cmd_fence *) fm; |
578 | ((unsigned long)fm + sizeof(__le32)); | 579 | cmd_fence->fence = *seqno; |
579 | |||
580 | iowrite32(*seqno, &cmd_fence->fence); | ||
581 | vmw_fifo_commit_flush(dev_priv, bytes); | 580 | vmw_fifo_commit_flush(dev_priv, bytes); |
582 | (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); | 581 | (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); |
583 | vmw_update_seqno(dev_priv, fifo_state); | 582 | vmw_update_seqno(dev_priv, fifo_state); |