aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_sa.c
diff options
context:
space:
mode:
authorChristian König <deathsimple@vodafone.de>2012-05-09 09:34:54 -0400
committerDave Airlie <airlied@redhat.com>2012-05-09 12:22:37 -0400
commit557017a0e219b2a466a71a8d72332a715d460416 (patch)
tree42edbacba20652c9b7106f0abcce772fb70cb3d2 /drivers/gpu/drm/radeon/radeon_sa.c
parent2e0d99103e7b62ad27dcbc8c92337687dd8294e6 (diff)
drm/radeon: define new SA interface v3
Define the interface without modifying the allocation algorithm in any way. v2: rebase on top of fence new uint64 patch v3: add ring to debugfs output Signed-off-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Christian König <deathsimple@vodafone.de> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_sa.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c60
1 files changed, 51 insertions, 9 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 625f2d4f638a..90ee8add2443 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -129,20 +129,32 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
129 * 129 *
130 * Alignment can't be bigger than page size 130 * Alignment can't be bigger than page size
131 */ 131 */
132
133static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
134{
135 list_del(&sa_bo->list);
136 radeon_fence_unref(&sa_bo->fence);
137 kfree(sa_bo);
138}
139
132int radeon_sa_bo_new(struct radeon_device *rdev, 140int radeon_sa_bo_new(struct radeon_device *rdev,
133 struct radeon_sa_manager *sa_manager, 141 struct radeon_sa_manager *sa_manager,
134 struct radeon_sa_bo **sa_bo, 142 struct radeon_sa_bo **sa_bo,
135 unsigned size, unsigned align) 143 unsigned size, unsigned align, bool block)
136{ 144{
137 struct radeon_sa_bo *tmp; 145 struct radeon_fence *fence = NULL;
146 struct radeon_sa_bo *tmp, *next;
138 struct list_head *head; 147 struct list_head *head;
139 unsigned offset = 0, wasted = 0; 148 unsigned offset = 0, wasted = 0;
149 int r;
140 150
141 BUG_ON(align > RADEON_GPU_PAGE_SIZE); 151 BUG_ON(align > RADEON_GPU_PAGE_SIZE);
142 BUG_ON(size > sa_manager->size); 152 BUG_ON(size > sa_manager->size);
143 153
144 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); 154 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
145 155
156retry:
157
146 spin_lock(&sa_manager->lock); 158 spin_lock(&sa_manager->lock);
147 159
148 /* no one ? */ 160 /* no one ? */
@@ -153,7 +165,17 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
153 165
154 /* look for a hole big enough */ 166 /* look for a hole big enough */
155 offset = 0; 167 offset = 0;
156 list_for_each_entry(tmp, &sa_manager->sa_bo, list) { 168 list_for_each_entry_safe(tmp, next, &sa_manager->sa_bo, list) {
169 /* try to free this object */
170 if (tmp->fence) {
171 if (radeon_fence_signaled(tmp->fence)) {
172 radeon_sa_bo_remove_locked(tmp);
173 continue;
174 } else {
175 fence = tmp->fence;
176 }
177 }
178
157 /* room before this object ? */ 179 /* room before this object ? */
158 if (offset < tmp->soffset && (tmp->soffset - offset) >= size) { 180 if (offset < tmp->soffset && (tmp->soffset - offset) >= size) {
159 head = tmp->list.prev; 181 head = tmp->list.prev;
@@ -178,6 +200,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
178 if ((sa_manager->size - offset) < size) { 200 if ((sa_manager->size - offset) < size) {
179 /* failed to find somethings big enough */ 201 /* failed to find somethings big enough */
180 spin_unlock(&sa_manager->lock); 202 spin_unlock(&sa_manager->lock);
203 if (block && fence) {
204 r = radeon_fence_wait(fence, false);
205 if (r)
206 return r;
207
208 goto retry;
209 }
181 kfree(*sa_bo); 210 kfree(*sa_bo);
182 *sa_bo = NULL; 211 *sa_bo = NULL;
183 return -ENOMEM; 212 return -ENOMEM;
@@ -192,15 +221,22 @@ out:
192 return 0; 221 return 0;
193} 222}
194 223
195void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo) 224void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
225 struct radeon_fence *fence)
196{ 226{
227 struct radeon_sa_manager *sa_manager;
228
197 if (!sa_bo || !*sa_bo) 229 if (!sa_bo || !*sa_bo)
198 return; 230 return;
199 231
200 spin_lock(&(*sa_bo)->manager->lock); 232 sa_manager = (*sa_bo)->manager;
201 list_del_init(&(*sa_bo)->list); 233 spin_lock(&sa_manager->lock);
202 spin_unlock(&(*sa_bo)->manager->lock); 234 if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
203 kfree(*sa_bo); 235 (*sa_bo)->fence = radeon_fence_ref(fence);
236 } else {
237 radeon_sa_bo_remove_locked(*sa_bo);
238 }
239 spin_unlock(&sa_manager->lock);
204 *sa_bo = NULL; 240 *sa_bo = NULL;
205} 241}
206 242
@@ -212,8 +248,14 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
212 248
213 spin_lock(&sa_manager->lock); 249 spin_lock(&sa_manager->lock);
214 list_for_each_entry(i, &sa_manager->sa_bo, list) { 250 list_for_each_entry(i, &sa_manager->sa_bo, list) {
215 seq_printf(m, "[%08x %08x] size %4d [%p]\n", 251 seq_printf(m, "[%08x %08x] size %4d (%p)",
216 i->soffset, i->eoffset, i->eoffset - i->soffset, i); 252 i->soffset, i->eoffset, i->eoffset - i->soffset, i);
253 if (i->fence) {
254 seq_printf(m, " protected by %Ld (%p) on ring %d\n",
255 i->fence->seq, i->fence, i->fence->ring);
256 } else {
257 seq_printf(m, "\n");
258 }
217 } 259 }
218 spin_unlock(&sa_manager->lock); 260 spin_unlock(&sa_manager->lock);
219} 261}