aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c60
1 files changed, 49 insertions, 11 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ee712c199b25..6e3d1c8f3483 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -78,7 +78,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
78 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets; 79 struct radeon_cs_buckets buckets;
80 unsigned i, j; 80 unsigned i, j;
81 bool duplicate; 81 bool duplicate, need_mmap_lock = false;
82 int r;
82 83
83 if (p->chunk_relocs_idx == -1) { 84 if (p->chunk_relocs_idx == -1) {
84 return 0; 85 return 0;
@@ -132,13 +133,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
132 * the buffers used for read only, which doubles the range 133 * the buffers used for read only, which doubles the range
133 * to 0 to 31. 32 is reserved for the kernel driver. 134 * to 0 to 31. 32 is reserved for the kernel driver.
134 */ 135 */
135 priority = (r->flags & 0xf) * 2 + !!r->write_domain; 136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
137 + !!r->write_domain;
136 138
137 /* the first reloc of an UVD job is the msg and that must be in 139 /* the first reloc of an UVD job is the msg and that must be in
138 VRAM, also but everything into VRAM on AGP cards to avoid 140 VRAM, also but everything into VRAM on AGP cards and older
139 image corruptions */ 141 IGP chips to avoid image corruptions */
140 if (p->ring == R600_RING_TYPE_UVD_INDEX && 142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
141 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev))) { 143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
146
142 /* TODO: is this still needed for NI+ ? */ 147 /* TODO: is this still needed for NI+ ? */
143 p->relocs[i].prefered_domains = 148 p->relocs[i].prefered_domains =
144 RADEON_GEM_DOMAIN_VRAM; 149 RADEON_GEM_DOMAIN_VRAM;
@@ -164,6 +169,19 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
164 p->relocs[i].allowed_domains = domain; 169 p->relocs[i].allowed_domains = domain;
165 } 170 }
166 171
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
177 return -EINVAL;
178 }
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
183 }
184
167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
168 p->relocs[i].handle = r->handle; 186 p->relocs[i].handle = r->handle;
169 187
@@ -176,8 +194,15 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
176 if (p->cs_flags & RADEON_CS_USE_VM) 194 if (p->cs_flags & RADEON_CS_USE_VM)
177 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm, 195 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
178 &p->validated); 196 &p->validated);
197 if (need_mmap_lock)
198 down_read(&current->mm->mmap_sem);
199
200 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
201
202 if (need_mmap_lock)
203 up_read(&current->mm->mmap_sem);
179 204
180 return radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring); 205 return r;
181} 206}
182 207
183static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority) 208static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
@@ -228,11 +253,17 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
228 int i; 253 int i;
229 254
230 for (i = 0; i < p->nrelocs; i++) { 255 for (i = 0; i < p->nrelocs; i++) {
256 struct reservation_object *resv;
257 struct fence *fence;
258
231 if (!p->relocs[i].robj) 259 if (!p->relocs[i].robj)
232 continue; 260 continue;
233 261
262 resv = p->relocs[i].robj->tbo.resv;
263 fence = reservation_object_get_excl(resv);
264
234 radeon_semaphore_sync_to(p->ib.semaphore, 265 radeon_semaphore_sync_to(p->ib.semaphore,
235 p->relocs[i].robj->tbo.sync_obj); 266 (struct radeon_fence *)fence);
236 } 267 }
237} 268}
238 269
@@ -402,7 +433,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
402 433
403 ttm_eu_fence_buffer_objects(&parser->ticket, 434 ttm_eu_fence_buffer_objects(&parser->ticket,
404 &parser->validated, 435 &parser->validated,
405 parser->ib.fence); 436 &parser->ib.fence->base);
406 } else if (backoff) { 437 } else if (backoff) {
407 ttm_eu_backoff_reservation(&parser->ticket, 438 ttm_eu_backoff_reservation(&parser->ticket,
408 &parser->validated); 439 &parser->validated);
@@ -450,7 +481,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
450 radeon_vce_note_usage(rdev); 481 radeon_vce_note_usage(rdev);
451 482
452 radeon_cs_sync_rings(parser); 483 radeon_cs_sync_rings(parser);
453 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 484 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
454 if (r) { 485 if (r) {
455 DRM_ERROR("Failed to schedule IB !\n"); 486 DRM_ERROR("Failed to schedule IB !\n");
456 } 487 }
@@ -541,9 +572,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
541 572
542 if ((rdev->family >= CHIP_TAHITI) && 573 if ((rdev->family >= CHIP_TAHITI) &&
543 (parser->chunk_const_ib_idx != -1)) { 574 (parser->chunk_const_ib_idx != -1)) {
544 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); 575 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
545 } else { 576 } else {
546 r = radeon_ib_schedule(rdev, &parser->ib, NULL); 577 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
547 } 578 }
548 579
549out: 580out:
@@ -628,6 +659,13 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
628 up_read(&rdev->exclusive_lock); 659 up_read(&rdev->exclusive_lock);
629 return -EBUSY; 660 return -EBUSY;
630 } 661 }
662 if (rdev->in_reset) {
663 up_read(&rdev->exclusive_lock);
664 r = radeon_gpu_reset(rdev);
665 if (!r)
666 r = -EAGAIN;
667 return r;
668 }
631 /* initialize parser */ 669 /* initialize parser */
632 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 670 memset(&parser, 0, sizeof(struct radeon_cs_parser));
633 parser.filp = filp; 671 parser.filp = filp;