aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@linux.ie>2009-09-14 21:12:56 -0400
committerDave Airlie <airlied@linux.ie>2009-09-15 19:15:39 -0400
commitecb114a128d150422d22eda238cb812f6b20bf39 (patch)
tree6cd09599363f9760fed8a210c576c984cbbeec18 /drivers
parent42dea5ddb56fe10e1d9a7840ddcb1df97a208a99 (diff)
drm/radeon/kms: IB locking dumps out a lockdep ordering issue
We sometimes lock IB then the ring and sometimes the ring then the IB. This is mostly due to the IB locking not being well defined about what data in the structs it actually locks. Define what I believe is the correct behaviour and gets rid of the lock dep ordering warning. Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c24
3 files changed, 22 insertions, 8 deletions
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 1ebfd5a6dfec..4f0d181a690c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -538,8 +538,8 @@ int r600_vb_ib_get(struct radeon_device *rdev)
538 538
539void r600_vb_ib_put(struct radeon_device *rdev) 539void r600_vb_ib_put(struct radeon_device *rdev)
540{ 540{
541 mutex_lock(&rdev->ib_pool.mutex);
542 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); 541 radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
542 mutex_lock(&rdev->ib_pool.mutex);
543 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); 543 list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs);
544 mutex_unlock(&rdev->ib_pool.mutex); 544 mutex_unlock(&rdev->ib_pool.mutex);
545 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); 545 radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 99292be8bc99..ff9e4171559a 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -402,6 +402,10 @@ struct radeon_ib {
402 uint32_t length_dw; 402 uint32_t length_dw;
403}; 403};
404 404
405/*
406 * locking -
407 * mutex protects scheduled_ibs, ready, alloc_bm
408 */
405struct radeon_ib_pool { 409struct radeon_ib_pool {
406 struct mutex mutex; 410 struct mutex mutex;
407 struct radeon_object *robj; 411 struct radeon_object *robj;
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 168a555d6fba..747b4bffb84b 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -56,10 +56,12 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
56 set_bit(i, rdev->ib_pool.alloc_bm); 56 set_bit(i, rdev->ib_pool.alloc_bm);
57 rdev->ib_pool.ibs[i].length_dw = 0; 57 rdev->ib_pool.ibs[i].length_dw = 0;
58 *ib = &rdev->ib_pool.ibs[i]; 58 *ib = &rdev->ib_pool.ibs[i];
59 mutex_unlock(&rdev->ib_pool.mutex);
59 goto out; 60 goto out;
60 } 61 }
61 if (list_empty(&rdev->ib_pool.scheduled_ibs)) { 62 if (list_empty(&rdev->ib_pool.scheduled_ibs)) {
62 /* we go do nothings here */ 63 /* we go do nothings here */
64 mutex_unlock(&rdev->ib_pool.mutex);
63 DRM_ERROR("all IB allocated none scheduled.\n"); 65 DRM_ERROR("all IB allocated none scheduled.\n");
64 r = -EINVAL; 66 r = -EINVAL;
65 goto out; 67 goto out;
@@ -69,10 +71,13 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
69 struct radeon_ib, list); 71 struct radeon_ib, list);
70 if (nib->fence == NULL) { 72 if (nib->fence == NULL) {
71 /* we go do nothings here */ 73 /* we go do nothings here */
74 mutex_unlock(&rdev->ib_pool.mutex);
72 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); 75 DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx);
73 r = -EINVAL; 76 r = -EINVAL;
74 goto out; 77 goto out;
75 } 78 }
79 mutex_unlock(&rdev->ib_pool.mutex);
80
76 r = radeon_fence_wait(nib->fence, false); 81 r = radeon_fence_wait(nib->fence, false);
77 if (r) { 82 if (r) {
78 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, 83 DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx,
@@ -81,12 +86,17 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
81 goto out; 86 goto out;
82 } 87 }
83 radeon_fence_unref(&nib->fence); 88 radeon_fence_unref(&nib->fence);
89
84 nib->length_dw = 0; 90 nib->length_dw = 0;
91
92 /* scheduled list is accessed here */
93 mutex_lock(&rdev->ib_pool.mutex);
85 list_del(&nib->list); 94 list_del(&nib->list);
86 INIT_LIST_HEAD(&nib->list); 95 INIT_LIST_HEAD(&nib->list);
96 mutex_unlock(&rdev->ib_pool.mutex);
97
87 *ib = nib; 98 *ib = nib;
88out: 99out:
89 mutex_unlock(&rdev->ib_pool.mutex);
90 if (r) { 100 if (r) {
91 radeon_fence_unref(&fence); 101 radeon_fence_unref(&fence);
92 } else { 102 } else {
@@ -110,9 +120,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
110 return; 120 return;
111 } 121 }
112 list_del(&tmp->list); 122 list_del(&tmp->list);
113 if (tmp->fence) { 123 INIT_LIST_HEAD(&tmp->list);
124 if (tmp->fence)
114 radeon_fence_unref(&tmp->fence); 125 radeon_fence_unref(&tmp->fence);
115 } 126
116 tmp->length_dw = 0; 127 tmp->length_dw = 0;
117 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); 128 clear_bit(tmp->idx, rdev->ib_pool.alloc_bm);
118 mutex_unlock(&rdev->ib_pool.mutex); 129 mutex_unlock(&rdev->ib_pool.mutex);
@@ -122,25 +133,24 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
122{ 133{
123 int r = 0; 134 int r = 0;
124 135
125 mutex_lock(&rdev->ib_pool.mutex);
126 if (!ib->length_dw || !rdev->cp.ready) { 136 if (!ib->length_dw || !rdev->cp.ready) {
127 /* TODO: Nothings in the ib we should report. */ 137 /* TODO: Nothings in the ib we should report. */
128 mutex_unlock(&rdev->ib_pool.mutex);
129 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); 138 DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx);
130 return -EINVAL; 139 return -EINVAL;
131 } 140 }
141
132 /* 64 dwords should be enough for fence too */ 142 /* 64 dwords should be enough for fence too */
133 r = radeon_ring_lock(rdev, 64); 143 r = radeon_ring_lock(rdev, 64);
134 if (r) { 144 if (r) {
135 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); 145 DRM_ERROR("radeon: scheduling IB failled (%d).\n", r);
136 mutex_unlock(&rdev->ib_pool.mutex);
137 return r; 146 return r;
138 } 147 }
139 radeon_ring_ib_execute(rdev, ib); 148 radeon_ring_ib_execute(rdev, ib);
140 radeon_fence_emit(rdev, ib->fence); 149 radeon_fence_emit(rdev, ib->fence);
141 radeon_ring_unlock_commit(rdev); 150 mutex_lock(&rdev->ib_pool.mutex);
142 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); 151 list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs);
143 mutex_unlock(&rdev->ib_pool.mutex); 152 mutex_unlock(&rdev->ib_pool.mutex);
153 radeon_ring_unlock_commit(rdev);
144 return 0; 154 return 0;
145} 155}
146 156