aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-04-26 17:28:16 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-03 05:18:31 -0400
commit4225d0f219d22440e33a5686bf806356cb25bcf5 (patch)
treeaa0447ef7f0a739000da6bdb241de42a70ae9205 /drivers/gpu
parent316d388450be37fedcf4b37cf211b2bdc7826bb8 (diff)
drm/i915: fixup __iomem mixups in ringbuffer.c
Two things: - ring->virtual start is an __iomem pointer, treat it accordingly. - dev_priv->status_page.page_addr is now always a cpu addr, no pointer casting needed for that. Take the opportunity to remove the unnecessary drm indirection when setting up the ringbuffer iomapping. v2: Add a compiler barrier before reading the hw status page. Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c39
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h7
3 files changed, 17 insertions, 31 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f0c0a7ed90e5..b5a1a72d3325 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -256,7 +256,7 @@ static int i915_dma_resume(struct drm_device * dev)
256 256
257 DRM_DEBUG_DRIVER("%s\n", __func__); 257 DRM_DEBUG_DRIVER("%s\n", __func__);
258 258
259 if (ring->map.handle == NULL) { 259 if (ring->virtual_start == NULL) {
260 DRM_ERROR("can not ioremap virtual address for" 260 DRM_ERROR("can not ioremap virtual address for"
261 " ring buffer\n"); 261 " ring buffer\n");
262 return -ENOMEM; 262 return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index df3a770d60fa..38096080a3de 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -977,20 +977,14 @@ static int intel_init_ring_buffer(struct drm_device *dev,
977 if (ret) 977 if (ret)
978 goto err_unref; 978 goto err_unref;
979 979
980 ring->map.size = ring->size; 980 ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
981 ring->map.offset = dev->agp->base + obj->gtt_offset; 981 ring->size);
982 ring->map.type = 0; 982 if (ring->virtual_start == NULL) {
983 ring->map.flags = 0;
984 ring->map.mtrr = 0;
985
986 drm_core_ioremap_wc(&ring->map, dev);
987 if (ring->map.handle == NULL) {
988 DRM_ERROR("Failed to map ringbuffer.\n"); 983 DRM_ERROR("Failed to map ringbuffer.\n");
989 ret = -EINVAL; 984 ret = -EINVAL;
990 goto err_unpin; 985 goto err_unpin;
991 } 986 }
992 987
993 ring->virtual_start = ring->map.handle;
994 ret = ring->init(ring); 988 ret = ring->init(ring);
995 if (ret) 989 if (ret)
996 goto err_unmap; 990 goto err_unmap;
@@ -1006,7 +1000,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1006 return 0; 1000 return 0;
1007 1001
1008err_unmap: 1002err_unmap:
1009 drm_core_ioremapfree(&ring->map, dev); 1003 iounmap(ring->virtual_start);
1010err_unpin: 1004err_unpin:
1011 i915_gem_object_unpin(obj); 1005 i915_gem_object_unpin(obj);
1012err_unref: 1006err_unref:
@@ -1034,7 +1028,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1034 1028
1035 I915_WRITE_CTL(ring, 0); 1029 I915_WRITE_CTL(ring, 0);
1036 1030
1037 drm_core_ioremapfree(&ring->map, ring->dev); 1031 iounmap(ring->virtual_start);
1038 1032
1039 i915_gem_object_unpin(ring->obj); 1033 i915_gem_object_unpin(ring->obj);
1040 drm_gem_object_unreference(&ring->obj->base); 1034 drm_gem_object_unreference(&ring->obj->base);
@@ -1048,7 +1042,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1048 1042
1049static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) 1043static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1050{ 1044{
1051 unsigned int *virt; 1045 uint32_t __iomem *virt;
1052 int rem = ring->size - ring->tail; 1046 int rem = ring->size - ring->tail;
1053 1047
1054 if (ring->space < rem) { 1048 if (ring->space < rem) {
@@ -1057,12 +1051,10 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1057 return ret; 1051 return ret;
1058 } 1052 }
1059 1053
1060 virt = (unsigned int *)(ring->virtual_start + ring->tail); 1054 virt = ring->virtual_start + ring->tail;
1061 rem /= 8; 1055 rem /= 4;
1062 while (rem--) { 1056 while (rem--)
1063 *virt++ = MI_NOOP; 1057 iowrite32(MI_NOOP, virt++);
1064 *virt++ = MI_NOOP;
1065 }
1066 1058
1067 ring->tail = 0; 1059 ring->tail = 0;
1068 ring->space = ring_space(ring); 1060 ring->space = ring_space(ring);
@@ -1427,20 +1419,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1427 if (IS_I830(ring->dev)) 1419 if (IS_I830(ring->dev))
1428 ring->effective_size -= 128; 1420 ring->effective_size -= 128;
1429 1421
1430 ring->map.offset = start; 1422 ring->virtual_start = ioremap_wc(start, size);
1431 ring->map.size = size; 1423 if (ring->virtual_start == NULL) {
1432 ring->map.type = 0;
1433 ring->map.flags = 0;
1434 ring->map.mtrr = 0;
1435
1436 drm_core_ioremap_wc(&ring->map, dev);
1437 if (ring->map.handle == NULL) {
1438 DRM_ERROR("can not ioremap virtual address for" 1424 DRM_ERROR("can not ioremap virtual address for"
1439 " ring buffer\n"); 1425 " ring buffer\n");
1440 return -ENOMEM; 1426 return -ENOMEM;
1441 } 1427 }
1442 1428
1443 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1444 return 0; 1429 return 0;
1445} 1430}
1446 1431
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7b879926969e..baba75714578 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -2,7 +2,7 @@
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4struct intel_hw_status_page { 4struct intel_hw_status_page {
5 u32 __iomem *page_addr; 5 u32 *page_addr;
6 unsigned int gfx_addr; 6 unsigned int gfx_addr;
7 struct drm_i915_gem_object *obj; 7 struct drm_i915_gem_object *obj;
8}; 8};
@@ -115,7 +115,6 @@ struct intel_ring_buffer {
115 u32 outstanding_lazy_request; 115 u32 outstanding_lazy_request;
116 116
117 wait_queue_head_t irq_queue; 117 wait_queue_head_t irq_queue;
118 drm_local_map_t map;
119 118
120 void *private; 119 void *private;
121}; 120};
@@ -149,7 +148,9 @@ static inline u32
149intel_read_status_page(struct intel_ring_buffer *ring, 148intel_read_status_page(struct intel_ring_buffer *ring,
150 int reg) 149 int reg)
151{ 150{
152 return ioread32(ring->status_page.page_addr + reg); 151 /* Ensure that the compiler doesn't optimize away the load. */
152 barrier();
153 return ring->status_page.page_addr[reg];
153} 154}
154 155
155/** 156/**