aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorCarl Worth <cworth@cworth.org>2009-03-20 14:54:25 -0400
committerEric Anholt <eric@anholt.net>2009-04-01 18:22:07 -0400
commit5e118f4139feafe97e913df67b1f7c1e5083e535 (patch)
treea4d73fb1bb51083ab95b6167c6a8c621f6245a63 /drivers/gpu
parent7026d4ac1fc134566c2c946e6c0d849fc03ba7b7 (diff)
drm/i915: Add a spinlock to protect the active_list
This is a baby-step in the direction of having finer-grained locking than the struct_mutex. Specifically, this will enable new debugging code to read the active list for printing out GPU state when the GPU is wedged, (while the struct_mutex is held, of course). Signed-off-by: Carl Worth <cworth@cworth.org> [anholt: indentation fix] Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c6
4 files changed, 29 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index c0f48bb366bf..317b1223e091 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -300,6 +300,7 @@ typedef struct drm_i915_private {
300 * 300 *
301 * A reference is held on the buffer while on this list. 301 * A reference is held on the buffer while on this list.
302 */ 302 */
303 spinlock_t active_list_lock;
303 struct list_head active_list; 304 struct list_head active_list;
304 305
305 /** 306 /**
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9f4eceb8093d..1449b452cc63 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1325,8 +1325,10 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
1325 obj_priv->active = 1; 1325 obj_priv->active = 1;
1326 } 1326 }
1327 /* Move from whatever list we were on to the tail of execution. */ 1327 /* Move from whatever list we were on to the tail of execution. */
1328 spin_lock(&dev_priv->mm.active_list_lock);
1328 list_move_tail(&obj_priv->list, 1329 list_move_tail(&obj_priv->list,
1329 &dev_priv->mm.active_list); 1330 &dev_priv->mm.active_list);
1331 spin_unlock(&dev_priv->mm.active_list_lock);
1330 obj_priv->last_rendering_seqno = seqno; 1332 obj_priv->last_rendering_seqno = seqno;
1331} 1333}
1332 1334
@@ -1468,6 +1470,7 @@ i915_gem_retire_request(struct drm_device *dev,
1468 /* Move any buffers on the active list that are no longer referenced 1470 /* Move any buffers on the active list that are no longer referenced
1469 * by the ringbuffer to the flushing/inactive lists as appropriate. 1471 * by the ringbuffer to the flushing/inactive lists as appropriate.
1470 */ 1472 */
1473 spin_lock(&dev_priv->mm.active_list_lock);
1471 while (!list_empty(&dev_priv->mm.active_list)) { 1474 while (!list_empty(&dev_priv->mm.active_list)) {
1472 struct drm_gem_object *obj; 1475 struct drm_gem_object *obj;
1473 struct drm_i915_gem_object *obj_priv; 1476 struct drm_i915_gem_object *obj_priv;
@@ -1482,7 +1485,7 @@ i915_gem_retire_request(struct drm_device *dev,
1482 * this seqno. 1485 * this seqno.
1483 */ 1486 */
1484 if (obj_priv->last_rendering_seqno != request->seqno) 1487 if (obj_priv->last_rendering_seqno != request->seqno)
1485 return; 1488 goto out;
1486 1489
1487#if WATCH_LRU 1490#if WATCH_LRU
1488 DRM_INFO("%s: retire %d moves to inactive list %p\n", 1491 DRM_INFO("%s: retire %d moves to inactive list %p\n",
@@ -1494,6 +1497,8 @@ i915_gem_retire_request(struct drm_device *dev,
1494 else 1497 else
1495 i915_gem_object_move_to_inactive(obj); 1498 i915_gem_object_move_to_inactive(obj);
1496 } 1499 }
1500out:
1501 spin_unlock(&dev_priv->mm.active_list_lock);
1497} 1502}
1498 1503
1499/** 1504/**
@@ -2215,15 +2220,20 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2215 } 2220 }
2216 } 2221 }
2217 if (obj_priv->gtt_space == NULL) { 2222 if (obj_priv->gtt_space == NULL) {
2223 bool lists_empty;
2224
2218 /* If the gtt is empty and we're still having trouble 2225 /* If the gtt is empty and we're still having trouble
2219 * fitting our object in, we're out of memory. 2226 * fitting our object in, we're out of memory.
2220 */ 2227 */
2221#if WATCH_LRU 2228#if WATCH_LRU
2222 DRM_INFO("%s: GTT full, evicting something\n", __func__); 2229 DRM_INFO("%s: GTT full, evicting something\n", __func__);
2223#endif 2230#endif
2224 if (list_empty(&dev_priv->mm.inactive_list) && 2231 spin_lock(&dev_priv->mm.active_list_lock);
2225 list_empty(&dev_priv->mm.flushing_list) && 2232 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2226 list_empty(&dev_priv->mm.active_list)) { 2233 list_empty(&dev_priv->mm.flushing_list) &&
2234 list_empty(&dev_priv->mm.active_list));
2235 spin_unlock(&dev_priv->mm.active_list_lock);
2236 if (lists_empty) {
2227 DRM_ERROR("GTT full, but LRU list empty\n"); 2237 DRM_ERROR("GTT full, but LRU list empty\n");
2228 return -ENOMEM; 2238 return -ENOMEM;
2229 } 2239 }
@@ -3679,6 +3689,7 @@ i915_gem_idle(struct drm_device *dev)
3679 3689
3680 i915_gem_retire_requests(dev); 3690 i915_gem_retire_requests(dev);
3681 3691
3692 spin_lock(&dev_priv->mm.active_list_lock);
3682 if (!dev_priv->mm.wedged) { 3693 if (!dev_priv->mm.wedged) {
3683 /* Active and flushing should now be empty as we've 3694 /* Active and flushing should now be empty as we've
3684 * waited for a sequence higher than any pending execbuffer 3695 * waited for a sequence higher than any pending execbuffer
@@ -3705,6 +3716,7 @@ i915_gem_idle(struct drm_device *dev)
3705 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS; 3716 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3706 i915_gem_object_move_to_inactive(obj_priv->obj); 3717 i915_gem_object_move_to_inactive(obj_priv->obj);
3707 } 3718 }
3719 spin_unlock(&dev_priv->mm.active_list_lock);
3708 3720
3709 while (!list_empty(&dev_priv->mm.flushing_list)) { 3721 while (!list_empty(&dev_priv->mm.flushing_list)) {
3710 struct drm_i915_gem_object *obj_priv; 3722 struct drm_i915_gem_object *obj_priv;
@@ -3953,7 +3965,10 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3953 if (ret != 0) 3965 if (ret != 0)
3954 return ret; 3966 return ret;
3955 3967
3968 spin_lock(&dev_priv->mm.active_list_lock);
3956 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3969 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3970 spin_unlock(&dev_priv->mm.active_list_lock);
3971
3957 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3972 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3958 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3973 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3959 BUG_ON(!list_empty(&dev_priv->mm.request_list)); 3974 BUG_ON(!list_empty(&dev_priv->mm.request_list));
@@ -3997,6 +4012,7 @@ i915_gem_load(struct drm_device *dev)
3997{ 4012{
3998 drm_i915_private_t *dev_priv = dev->dev_private; 4013 drm_i915_private_t *dev_priv = dev->dev_private;
3999 4014
4015 spin_lock_init(&dev_priv->mm.active_list_lock);
4000 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4016 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4001 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4017 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4002 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4018 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 131c088f8c8a..8d0b943e2c5a 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -105,12 +105,14 @@ i915_dump_lru(struct drm_device *dev, const char *where)
105 struct drm_i915_gem_object *obj_priv; 105 struct drm_i915_gem_object *obj_priv;
106 106
107 DRM_INFO("active list %s {\n", where); 107 DRM_INFO("active list %s {\n", where);
108 spin_lock(&dev_priv->mm.active_list_lock);
108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, 109 list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
109 list) 110 list)
110 { 111 {
111 DRM_INFO(" %p: %08x\n", obj_priv, 112 DRM_INFO(" %p: %08x\n", obj_priv,
112 obj_priv->last_rendering_seqno); 113 obj_priv->last_rendering_seqno);
113 } 114 }
115 spin_unlock(&dev_priv->mm.active_list_lock);
114 DRM_INFO("}\n"); 116 DRM_INFO("}\n");
115 DRM_INFO("flushing list %s {\n", where); 117 DRM_INFO("flushing list %s {\n", where);
116 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, 118 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index 455ec970b385..a1ac0c5e7307 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -69,10 +69,13 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
69 struct drm_device *dev = node->minor->dev; 69 struct drm_device *dev = node->minor->dev;
70 drm_i915_private_t *dev_priv = dev->dev_private; 70 drm_i915_private_t *dev_priv = dev->dev_private;
71 struct drm_i915_gem_object *obj_priv; 71 struct drm_i915_gem_object *obj_priv;
72 spinlock_t *lock = NULL;
72 73
73 switch (list) { 74 switch (list) {
74 case ACTIVE_LIST: 75 case ACTIVE_LIST:
75 seq_printf(m, "Active:\n"); 76 seq_printf(m, "Active:\n");
77 lock = &dev_priv->mm.active_list_lock;
78 spin_lock(lock);
76 head = &dev_priv->mm.active_list; 79 head = &dev_priv->mm.active_list;
77 break; 80 break;
78 case INACTIVE_LIST: 81 case INACTIVE_LIST:
@@ -104,6 +107,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
104 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg); 107 seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
105 seq_printf(m, "\n"); 108 seq_printf(m, "\n");
106 } 109 }
110
111 if (lock)
112 spin_unlock(lock);
107 return 0; 113 return 0;
108} 114}
109 115