aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c109
1 files changed, 82 insertions, 27 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f6b9baa6a63d..445f27efe677 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,14 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37static inline int ring_space(struct intel_ring_buffer *ring)
38{
39 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
40 if (space < 0)
41 space += ring->size;
42 return space;
43}
44
37static u32 i915_gem_get_seqno(struct drm_device *dev) 45static u32 i915_gem_get_seqno(struct drm_device *dev)
38{ 46{
39 drm_i915_private_t *dev_priv = dev->dev_private; 47 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -204,11 +212,9 @@ static int init_ring_common(struct intel_ring_buffer *ring)
204 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 212 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
205 i915_kernel_lost_context(ring->dev); 213 i915_kernel_lost_context(ring->dev);
206 else { 214 else {
207 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 215 ring->head = I915_READ_HEAD(ring);
208 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 216 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
209 ring->space = ring->head - (ring->tail + 8); 217 ring->space = ring_space(ring);
210 if (ring->space < 0)
211 ring->space += ring->size;
212 } 218 }
213 219
214 return 0; 220 return 0;
@@ -921,32 +927,34 @@ static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
921 } 927 }
922 928
923 ring->tail = 0; 929 ring->tail = 0;
924 ring->space = ring->head - 8; 930 ring->space = ring_space(ring);
925 931
926 return 0; 932 return 0;
927} 933}
928 934
929int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 935int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
930{ 936{
931 int reread = 0;
932 struct drm_device *dev = ring->dev; 937 struct drm_device *dev = ring->dev;
933 struct drm_i915_private *dev_priv = dev->dev_private; 938 struct drm_i915_private *dev_priv = dev->dev_private;
934 unsigned long end; 939 unsigned long end;
935 u32 head; 940 u32 head;
936 941
942 /* If the reported head position has wrapped or hasn't advanced,
943 * fallback to the slow and accurate path.
944 */
945 head = intel_read_status_page(ring, 4);
946 if (head > ring->head) {
947 ring->head = head;
948 ring->space = ring_space(ring);
949 if (ring->space >= n)
950 return 0;
951 }
952
937 trace_i915_ring_wait_begin (dev); 953 trace_i915_ring_wait_begin (dev);
938 end = jiffies + 3 * HZ; 954 end = jiffies + 3 * HZ;
939 do { 955 do {
940 /* If the reported head position has wrapped or hasn't advanced, 956 ring->head = I915_READ_HEAD(ring);
941 * fallback to the slow and accurate path. 957 ring->space = ring_space(ring);
942 */
943 head = intel_read_status_page(ring, 4);
944 if (reread)
945 head = I915_READ_HEAD(ring);
946 ring->head = head & HEAD_ADDR;
947 ring->space = ring->head - (ring->tail + 8);
948 if (ring->space < 0)
949 ring->space += ring->size;
950 if (ring->space >= n) { 958 if (ring->space >= n) {
951 trace_i915_ring_wait_end(dev); 959 trace_i915_ring_wait_end(dev);
952 return 0; 960 return 0;
@@ -961,7 +969,6 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
961 msleep(1); 969 msleep(1);
962 if (atomic_read(&dev_priv->mm.wedged)) 970 if (atomic_read(&dev_priv->mm.wedged))
963 return -EAGAIN; 971 return -EAGAIN;
964 reread = 1;
965 } while (!time_after(jiffies, end)); 972 } while (!time_after(jiffies, end));
966 trace_i915_ring_wait_end (dev); 973 trace_i915_ring_wait_end (dev);
967 return -EBUSY; 974 return -EBUSY;
@@ -1052,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1052} 1059}
1053 1060
1054static int gen6_ring_flush(struct intel_ring_buffer *ring, 1061static int gen6_ring_flush(struct intel_ring_buffer *ring,
1055 u32 invalidate_domains, 1062 u32 invalidate, u32 flush)
1056 u32 flush_domains)
1057{ 1063{
1064 uint32_t cmd;
1058 int ret; 1065 int ret;
1059 1066
1060 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1067 if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1061 return 0; 1068 return 0;
1062 1069
1063 ret = intel_ring_begin(ring, 4); 1070 ret = intel_ring_begin(ring, 4);
1064 if (ret) 1071 if (ret)
1065 return ret; 1072 return ret;
1066 1073
1067 intel_ring_emit(ring, MI_FLUSH_DW); 1074 cmd = MI_FLUSH_DW;
1068 intel_ring_emit(ring, 0); 1075 if (invalidate & I915_GEM_GPU_DOMAINS)
1076 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1077 intel_ring_emit(ring, cmd);
1069 intel_ring_emit(ring, 0); 1078 intel_ring_emit(ring, 0);
1070 intel_ring_emit(ring, 0); 1079 intel_ring_emit(ring, 0);
1080 intel_ring_emit(ring, MI_NOOP);
1071 intel_ring_advance(ring); 1081 intel_ring_advance(ring);
1072 return 0; 1082 return 0;
1073} 1083}
@@ -1223,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1223} 1233}
1224 1234
1225static int blt_ring_flush(struct intel_ring_buffer *ring, 1235static int blt_ring_flush(struct intel_ring_buffer *ring,
1226 u32 invalidate_domains, 1236 u32 invalidate, u32 flush)
1227 u32 flush_domains)
1228{ 1237{
1238 uint32_t cmd;
1229 int ret; 1239 int ret;
1230 1240
1231 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1241 if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1232 return 0; 1242 return 0;
1233 1243
1234 ret = blt_ring_begin(ring, 4); 1244 ret = blt_ring_begin(ring, 4);
1235 if (ret) 1245 if (ret)
1236 return ret; 1246 return ret;
1237 1247
1238 intel_ring_emit(ring, MI_FLUSH_DW); 1248 cmd = MI_FLUSH_DW;
1239 intel_ring_emit(ring, 0); 1249 if (invalidate & I915_GEM_DOMAIN_RENDER)
1250 cmd |= MI_INVALIDATE_TLB;
1251 intel_ring_emit(ring, cmd);
1240 intel_ring_emit(ring, 0); 1252 intel_ring_emit(ring, 0);
1241 intel_ring_emit(ring, 0); 1253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, MI_NOOP);
1242 intel_ring_advance(ring); 1255 intel_ring_advance(ring);
1243 return 0; 1256 return 0;
1244} 1257}
@@ -1292,6 +1305,48 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1292 return intel_init_ring_buffer(dev, ring); 1305 return intel_init_ring_buffer(dev, ring);
1293} 1306}
1294 1307
1308int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1309{
1310 drm_i915_private_t *dev_priv = dev->dev_private;
1311 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1312
1313 *ring = render_ring;
1314 if (INTEL_INFO(dev)->gen >= 6) {
1315 ring->add_request = gen6_add_request;
1316 ring->irq_get = gen6_render_ring_get_irq;
1317 ring->irq_put = gen6_render_ring_put_irq;
1318 } else if (IS_GEN5(dev)) {
1319 ring->add_request = pc_render_add_request;
1320 ring->get_seqno = pc_render_get_seqno;
1321 }
1322
1323 ring->dev = dev;
1324 INIT_LIST_HEAD(&ring->active_list);
1325 INIT_LIST_HEAD(&ring->request_list);
1326 INIT_LIST_HEAD(&ring->gpu_write_list);
1327
1328 ring->size = size;
1329 ring->effective_size = ring->size;
1330 if (IS_I830(ring->dev))
1331 ring->effective_size -= 128;
1332
1333 ring->map.offset = start;
1334 ring->map.size = size;
1335 ring->map.type = 0;
1336 ring->map.flags = 0;
1337 ring->map.mtrr = 0;
1338
1339 drm_core_ioremap_wc(&ring->map, dev);
1340 if (ring->map.handle == NULL) {
1341 DRM_ERROR("can not ioremap virtual address for"
1342 " ring buffer\n");
1343 return -ENOMEM;
1344 }
1345
1346 ring->virtual_start = (void __force __iomem *)ring->map.handle;
1347 return 0;
1348}
1349
1295int intel_init_bsd_ring_buffer(struct drm_device *dev) 1350int intel_init_bsd_ring_buffer(struct drm_device *dev)
1296{ 1351{
1297 drm_i915_private_t *dev_priv = dev->dev_private; 1352 drm_i915_private_t *dev_priv = dev->dev_private;