aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_proc.c5
3 files changed, 50 insertions, 39 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0a4f39b9a0ec..76d9a706d8fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -244,6 +244,10 @@ typedef struct drm_i915_private {
244 * List of objects currently involved in rendering from the 244 * List of objects currently involved in rendering from the
245 * ringbuffer. 245 * ringbuffer.
246 * 246 *
247 * Includes buffers having the contents of their GPU caches
248 * flushed, not necessarily primitives. last_rendering_seqno
249 * represents when the rendering involved will be completed.
250 *
247 * A reference is held on the buffer while on this list. 251 * A reference is held on the buffer while on this list.
248 */ 252 */
249 struct list_head active_list; 253 struct list_head active_list;
@@ -253,6 +257,8 @@ typedef struct drm_i915_private {
253 * still have a write_domain which needs to be flushed before 257 * still have a write_domain which needs to be flushed before
254 * unbinding. 258 * unbinding.
255 * 259 *
260 * last_rendering_seqno is 0 while an object is in this list.
261 *
256 * A reference is held on the buffer while on this list. 262 * A reference is held on the buffer while on this list.
257 */ 263 */
258 struct list_head flushing_list; 264 struct list_head flushing_list;
@@ -261,6 +267,8 @@ typedef struct drm_i915_private {
261 * LRU list of objects which are not in the ringbuffer and 267 * LRU list of objects which are not in the ringbuffer and
262 * are ready to unbind, but are still in the GTT. 268 * are ready to unbind, but are still in the GTT.
263 * 269 *
270 * last_rendering_seqno is 0 while an object is in this list.
271 *
264 * A reference is not held on the buffer while on this list, 272 * A reference is not held on the buffer while on this list,
265 * as merely being GTT-bound shouldn't prevent its being 273 * as merely being GTT-bound shouldn't prevent its being
266 * freed, and we'll pull it off the list in the free path. 274 * freed, and we'll pull it off the list in the free path.
@@ -394,9 +402,6 @@ struct drm_i915_gem_request {
394 /** Time at which this request was emitted, in jiffies. */ 402 /** Time at which this request was emitted, in jiffies. */
395 unsigned long emitted_jiffies; 403 unsigned long emitted_jiffies;
396 404
397 /** Cache domains that were flushed at the start of the request. */
398 uint32_t flush_domains;
399
400 struct list_head list; 405 struct list_head list;
401}; 406};
402 407
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d58ddef468f8..9fd28ebe0aa3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -532,7 +532,7 @@ i915_gem_object_free_page_list(struct drm_gem_object *obj)
532} 532}
533 533
534static void 534static void
535i915_gem_object_move_to_active(struct drm_gem_object *obj) 535i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
536{ 536{
537 struct drm_device *dev = obj->dev; 537 struct drm_device *dev = obj->dev;
538 drm_i915_private_t *dev_priv = dev->dev_private; 538 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -546,8 +546,20 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj)
546 /* Move from whatever list we were on to the tail of execution. */ 546 /* Move from whatever list we were on to the tail of execution. */
547 list_move_tail(&obj_priv->list, 547 list_move_tail(&obj_priv->list,
548 &dev_priv->mm.active_list); 548 &dev_priv->mm.active_list);
549 obj_priv->last_rendering_seqno = seqno;
549} 550}
550 551
552static void
553i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
554{
555 struct drm_device *dev = obj->dev;
556 drm_i915_private_t *dev_priv = dev->dev_private;
557 struct drm_i915_gem_object *obj_priv = obj->driver_private;
558
559 BUG_ON(!obj_priv->active);
560 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
561 obj_priv->last_rendering_seqno = 0;
562}
551 563
552static void 564static void
553i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 565i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
@@ -562,6 +574,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
562 else 574 else
563 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 575 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
564 576
577 obj_priv->last_rendering_seqno = 0;
565 if (obj_priv->active) { 578 if (obj_priv->active) {
566 obj_priv->active = 0; 579 obj_priv->active = 0;
567 drm_gem_object_unreference(obj); 580 drm_gem_object_unreference(obj);
@@ -610,10 +623,28 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
610 623
611 request->seqno = seqno; 624 request->seqno = seqno;
612 request->emitted_jiffies = jiffies; 625 request->emitted_jiffies = jiffies;
613 request->flush_domains = flush_domains;
614 was_empty = list_empty(&dev_priv->mm.request_list); 626 was_empty = list_empty(&dev_priv->mm.request_list);
615 list_add_tail(&request->list, &dev_priv->mm.request_list); 627 list_add_tail(&request->list, &dev_priv->mm.request_list);
616 628
629 /* Associate any objects on the flushing list matching the write
630 * domain we're flushing with our flush.
631 */
632 if (flush_domains != 0) {
633 struct drm_i915_gem_object *obj_priv, *next;
634
635 list_for_each_entry_safe(obj_priv, next,
636 &dev_priv->mm.flushing_list, list) {
637 struct drm_gem_object *obj = obj_priv->obj;
638
639 if ((obj->write_domain & flush_domains) ==
640 obj->write_domain) {
641 obj->write_domain = 0;
642 i915_gem_object_move_to_active(obj, seqno);
643 }
644 }
645
646 }
647
617 if (was_empty && !dev_priv->mm.suspended) 648 if (was_empty && !dev_priv->mm.suspended)
618 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 649 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
619 return seqno; 650 return seqno;
@@ -676,30 +707,10 @@ i915_gem_retire_request(struct drm_device *dev,
676 __func__, request->seqno, obj); 707 __func__, request->seqno, obj);
677#endif 708#endif
678 709
679 if (obj->write_domain != 0) { 710 if (obj->write_domain != 0)
680 list_move_tail(&obj_priv->list, 711 i915_gem_object_move_to_flushing(obj);
681 &dev_priv->mm.flushing_list); 712 else
682 } else {
683 i915_gem_object_move_to_inactive(obj); 713 i915_gem_object_move_to_inactive(obj);
684 }
685 }
686
687 if (request->flush_domains != 0) {
688 struct drm_i915_gem_object *obj_priv, *next;
689
690 /* Clear the write domain and activity from any buffers
691 * that are just waiting for a flush matching the one retired.
692 */
693 list_for_each_entry_safe(obj_priv, next,
694 &dev_priv->mm.flushing_list, list) {
695 struct drm_gem_object *obj = obj_priv->obj;
696
697 if (obj->write_domain & request->flush_domains) {
698 obj->write_domain = 0;
699 i915_gem_object_move_to_inactive(obj);
700 }
701 }
702
703 } 714 }
704} 715}
705 716
@@ -896,17 +907,15 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
896 * create a new seqno to wait for. 907 * create a new seqno to wait for.
897 */ 908 */
898 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 909 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
899 uint32_t write_domain = obj->write_domain; 910 uint32_t seqno, write_domain = obj->write_domain;
900#if WATCH_BUF 911#if WATCH_BUF
901 DRM_INFO("%s: flushing object %p from write domain %08x\n", 912 DRM_INFO("%s: flushing object %p from write domain %08x\n",
902 __func__, obj, write_domain); 913 __func__, obj, write_domain);
903#endif 914#endif
904 i915_gem_flush(dev, 0, write_domain); 915 i915_gem_flush(dev, 0, write_domain);
905 916
906 i915_gem_object_move_to_active(obj); 917 seqno = i915_add_request(dev, write_domain);
907 obj_priv->last_rendering_seqno = i915_add_request(dev, 918 i915_gem_object_move_to_active(obj, seqno);
908 write_domain);
909 BUG_ON(obj_priv->last_rendering_seqno == 0);
910#if WATCH_LRU 919#if WATCH_LRU
911 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); 920 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
912#endif 921#endif
@@ -1927,10 +1936,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
1927 i915_file_priv->mm.last_gem_seqno = seqno; 1936 i915_file_priv->mm.last_gem_seqno = seqno;
1928 for (i = 0; i < args->buffer_count; i++) { 1937 for (i = 0; i < args->buffer_count; i++) {
1929 struct drm_gem_object *obj = object_list[i]; 1938 struct drm_gem_object *obj = object_list[i];
1930 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1931 1939
1932 i915_gem_object_move_to_active(obj); 1940 i915_gem_object_move_to_active(obj, seqno);
1933 obj_priv->last_rendering_seqno = seqno;
1934#if WATCH_LRU 1941#if WATCH_LRU
1935 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 1942 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1936#endif 1943#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c
index 93de15b4c9a7..e8d5abe1250e 100644
--- a/drivers/gpu/drm/i915/i915_gem_proc.c
+++ b/drivers/gpu/drm/i915/i915_gem_proc.c
@@ -166,10 +166,9 @@ static int i915_gem_request_info(char *buf, char **start, off_t offset,
166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list,
167 list) 167 list)
168 { 168 {
169 DRM_PROC_PRINT(" %d @ %d %08x\n", 169 DRM_PROC_PRINT(" %d @ %d\n",
170 gem_request->seqno, 170 gem_request->seqno,
171 (int) (jiffies - gem_request->emitted_jiffies), 171 (int) (jiffies - gem_request->emitted_jiffies));
172 gem_request->flush_domains);
173 } 172 }
174 if (len > request + offset) 173 if (len > request + offset)
175 return request; 174 return request;