aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2011-01-23 12:24:26 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2011-01-24 18:45:32 -0500
commitbdd92c9ad287e03a2ec52f5a89c470cd5caae1c2 (patch)
tree38d863507e900fb2ccac4c22fcf8934271c051b5 /drivers/gpu/drm/i915/intel_ringbuffer.c
parenta37f2f87edc1b6e5932becf6e51535d36b690f2a (diff)
parent8e934dbf264418afe4d1dff34ce074ecc14280db (diff)
Merge branch 'drm-intel-fixes' into drm-intel-next
Merge important suspend and resume regression fixes and resolve the small conflict. Conflicts: drivers/gpu/drm/i915/i915_dma.c
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c82
1 files changed, 66 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 94640e0e4edf..cacc89f22621 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -920,7 +926,7 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
920 } 926 }
921 927
922 ring->tail = 0; 928 ring->tail = 0;
923 ring->space = ring->head - 8; 929 ring->space = ring_space(ring);
924 930
925 return 0; 931 return 0;
926} 932}
@@ -932,20 +938,22 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
932 unsigned long end; 938 unsigned long end;
933 u32 head; 939 u32 head;
934 940
941 /* If the reported head position has wrapped or hasn't advanced,
942 * fallback to the slow and accurate path.
943 */
944 head = intel_read_status_page(ring, 4);
945 if (head > ring->head) {
946 ring->head = head;
947 ring->space = ring_space(ring);
948 if (ring->space >= n)
949 return 0;
950 }
951
935 trace_i915_ring_wait_begin (dev); 952 trace_i915_ring_wait_begin (dev);
936 end = jiffies + 3 * HZ; 953 end = jiffies + 3 * HZ;
937 do { 954 do {
938 /* If the reported head position has wrapped or hasn't advanced, 955 ring->head = I915_READ_HEAD(ring);
939 * fallback to the slow and accurate path. 956 ring->space = ring_space(ring);
940 */
941 head = intel_read_status_page(ring, 4);
942 if (head < ring->actual_head)
943 head = I915_READ_HEAD(ring);
944 ring->actual_head = head;
945 ring->head = head & HEAD_ADDR;
946 ring->space = ring->head - (ring->tail + 8);
947 if (ring->space < 0)
948 ring->space += ring->size;
949 if (ring->space >= n) { 957 if (ring->space >= n) {
950 trace_i915_ring_wait_end(dev); 958 trace_i915_ring_wait_end(dev);
951 return 0; 959 return 0;
@@ -1290,6 +1298,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1290 return intel_init_ring_buffer(dev, ring); 1298 return intel_init_ring_buffer(dev, ring);
1291} 1299}
1292 1300
1301int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1302{
1303 drm_i915_private_t *dev_priv = dev->dev_private;
1304 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1305
1306 *ring = render_ring;
1307 if (INTEL_INFO(dev)->gen >= 6) {
1308 ring->add_request = gen6_add_request;
1309 ring->irq_get = gen6_render_ring_get_irq;
1310 ring->irq_put = gen6_render_ring_put_irq;
1311 } else if (IS_GEN5(dev)) {
1312 ring->add_request = pc_render_add_request;
1313 ring->get_seqno = pc_render_get_seqno;
1314 }
1315
1316 ring->dev = dev;
1317 INIT_LIST_HEAD(&ring->active_list);
1318 INIT_LIST_HEAD(&ring->request_list);
1319 INIT_LIST_HEAD(&ring->gpu_write_list);
1320
1321 ring->size = size;
1322 ring->effective_size = ring->size;
1323 if (IS_I830(ring->dev))
1324 ring->effective_size -= 128;
1325
1326 ring->map.offset = start;
1327 ring->map.size = size;
1328 ring->map.type = 0;
1329 ring->map.flags = 0;
1330 ring->map.mtrr = 0;
1331
1332 drm_core_ioremap_wc(&ring->map, dev);
1333 if (ring->map.handle == NULL) {
1334 DRM_ERROR("can not ioremap virtual address for"
1335 " ring buffer\n");
1336 return -ENOMEM;
1337 }
1338
1339 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1340 return 0;
1341}
1342
1293int intel_init_bsd_ring_buffer(struct drm_device *dev) 1343int intel_init_bsd_ring_buffer(struct drm_device *dev)
1294{ 1344{
1295 drm_i915_private_t *dev_priv = dev->dev_private; 1345 drm_i915_private_t *dev_priv = dev->dev_private;