aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_cs.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 07:39:59 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2012-05-08 07:39:59 -0400
commit5e13a0c5ec05d382b488a691dfb8af015b1dea1e (patch)
tree7a06dfa1f7661f8908193f2437b32452520221d3 /drivers/gpu/drm/radeon/radeon_cs.c
parentb615b57a124a4af7b68196bc2fb8acc236041fa2 (diff)
parent4f256e8aa3eda15c11c3cec3ec5336e1fc579cbd (diff)
Merge remote-tracking branch 'airlied/drm-core-next' into drm-intel-next-queued
Backmerge of drm-next to resolve a few ugly conflicts and to get a few fixes from 3.4-rc6 (which drm-next has already merged). Note that this merge also restricts the stencil cache lra evict policy workaround to snb (as it should) - I had to frob the code anyway because the CM0_MASK_SHIFT define died in the masked bit cleanups. We need the backmerge to get Paulo Zanoni's infoframe regression fix for gm45 - further bugfixes from him touch the same area and would needlessly conflict. Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c53
1 files changed, 30 insertions, 23 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e7b0b5d51bc3..c66beb1662b5 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -118,6 +118,7 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
118static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 118static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
119{ 119{
120 bool sync_to_ring[RADEON_NUM_RINGS] = { }; 120 bool sync_to_ring[RADEON_NUM_RINGS] = { };
121 bool need_sync = false;
121 int i, r; 122 int i, r;
122 123
123 for (i = 0; i < p->nrelocs; i++) { 124 for (i = 0; i < p->nrelocs; i++) {
@@ -126,36 +127,24 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
126 127
127 if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) { 128 if (!(p->relocs[i].flags & RADEON_RELOC_DONT_SYNC)) {
128 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj; 129 struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
129 if (!radeon_fence_signaled(fence)) { 130 if (fence->ring != p->ring && !radeon_fence_signaled(fence)) {
130 sync_to_ring[fence->ring] = true; 131 sync_to_ring[fence->ring] = true;
132 need_sync = true;
131 } 133 }
132 } 134 }
133 } 135 }
134 136
135 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 137 if (!need_sync) {
136 /* no need to sync to our own or unused rings */ 138 return 0;
137 if (i == p->ring || !sync_to_ring[i] || !p->rdev->ring[i].ready) 139 }
138 continue;
139
140 if (!p->ib->fence->semaphore) {
141 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
142 if (r)
143 return r;
144 }
145
146 r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
147 if (r)
148 return r;
149 radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
150 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
151 140
152 r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3); 141 r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
153 if (r) 142 if (r) {
154 return r; 143 return r;
155 radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
156 radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
157 } 144 }
158 return 0; 145
146 return radeon_semaphore_sync_rings(p->rdev, p->ib->fence->semaphore,
147 sync_to_ring, p->ring);
159} 148}
160 149
161int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 150int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -172,6 +161,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
172 /* get chunks */ 161 /* get chunks */
173 INIT_LIST_HEAD(&p->validated); 162 INIT_LIST_HEAD(&p->validated);
174 p->idx = 0; 163 p->idx = 0;
164 p->ib = NULL;
165 p->const_ib = NULL;
175 p->chunk_ib_idx = -1; 166 p->chunk_ib_idx = -1;
176 p->chunk_relocs_idx = -1; 167 p->chunk_relocs_idx = -1;
177 p->chunk_flags_idx = -1; 168 p->chunk_flags_idx = -1;
@@ -336,6 +327,9 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
336 kfree(parser->chunks); 327 kfree(parser->chunks);
337 kfree(parser->chunks_array); 328 kfree(parser->chunks_array);
338 radeon_ib_free(parser->rdev, &parser->ib); 329 radeon_ib_free(parser->rdev, &parser->ib);
330 if (parser->const_ib) {
331 radeon_ib_free(parser->rdev, &parser->const_ib);
332 }
339} 333}
340 334
341static int radeon_cs_ib_chunk(struct radeon_device *rdev, 335static int radeon_cs_ib_chunk(struct radeon_device *rdev,
@@ -507,6 +501,16 @@ out:
507 return r; 501 return r;
508} 502}
509 503
504static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
505{
506 if (r == -EDEADLK) {
507 r = radeon_gpu_reset(rdev);
508 if (!r)
509 r = -EAGAIN;
510 }
511 return r;
512}
513
510int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) 514int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
511{ 515{
512 struct radeon_device *rdev = dev->dev_private; 516 struct radeon_device *rdev = dev->dev_private;
@@ -528,6 +532,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
528 if (r) { 532 if (r) {
529 DRM_ERROR("Failed to initialize parser !\n"); 533 DRM_ERROR("Failed to initialize parser !\n");
530 radeon_cs_parser_fini(&parser, r); 534 radeon_cs_parser_fini(&parser, r);
535 r = radeon_cs_handle_lockup(rdev, r);
531 radeon_mutex_unlock(&rdev->cs_mutex); 536 radeon_mutex_unlock(&rdev->cs_mutex);
532 return r; 537 return r;
533 } 538 }
@@ -536,6 +541,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
536 if (r != -ERESTARTSYS) 541 if (r != -ERESTARTSYS)
537 DRM_ERROR("Failed to parse relocation %d!\n", r); 542 DRM_ERROR("Failed to parse relocation %d!\n", r);
538 radeon_cs_parser_fini(&parser, r); 543 radeon_cs_parser_fini(&parser, r);
544 r = radeon_cs_handle_lockup(rdev, r);
539 radeon_mutex_unlock(&rdev->cs_mutex); 545 radeon_mutex_unlock(&rdev->cs_mutex);
540 return r; 546 return r;
541 } 547 }
@@ -549,6 +555,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
549 } 555 }
550out: 556out:
551 radeon_cs_parser_fini(&parser, r); 557 radeon_cs_parser_fini(&parser, r);
558 r = radeon_cs_handle_lockup(rdev, r);
552 radeon_mutex_unlock(&rdev->cs_mutex); 559 radeon_mutex_unlock(&rdev->cs_mutex);
553 return r; 560 return r;
554} 561}