aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c121
1 files changed, 49 insertions, 72 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6f377de099f9..c830863bc98a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev; 77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets; 79 struct radeon_cs_buckets buckets;
80 unsigned i, j; 80 unsigned i;
81 bool duplicate, need_mmap_lock = false; 81 bool need_mmap_lock = false;
82 int r; 82 int r;
83 83
84 if (p->chunk_relocs_idx == -1) { 84 if (p->chunk_relocs == NULL) {
85 return 0; 85 return 0;
86 } 86 }
87 chunk = &p->chunks[p->chunk_relocs_idx]; 87 chunk = p->chunk_relocs;
88 p->dma_reloc_idx = 0; 88 p->dma_reloc_idx = 0;
89 /* FIXME: we assume that each relocs use 4 dwords */ 89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4; 90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 91 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
93 return -ENOMEM;
94 }
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) { 92 if (p->relocs == NULL) {
97 return -ENOMEM; 93 return -ENOMEM;
98 } 94 }
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
101 97
102 for (i = 0; i < p->nrelocs; i++) { 98 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r; 99 struct drm_radeon_cs_reloc *r;
100 struct drm_gem_object *gobj;
104 unsigned priority; 101 unsigned priority;
105 102
106 duplicate = false;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 103 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) { 104 gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
109 if (r->handle == p->relocs[j].handle) { 105 if (gobj == NULL) {
110 p->relocs_ptr[i] = &p->relocs[j];
111 duplicate = true;
112 break;
113 }
114 }
115 if (duplicate) {
116 p->relocs[i].handle = 0;
117 continue;
118 }
119
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
121 r->handle);
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n", 106 DRM_ERROR("gem object lookup failed 0x%x\n",
124 r->handle); 107 r->handle);
125 return -ENOENT; 108 return -ENOENT;
126 } 109 }
127 p->relocs_ptr[i] = &p->relocs[i]; 110 p->relocs[i].robj = gem_to_radeon_bo(gobj);
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
129 111
130 /* The userspace buffer priorities are from 0 to 15. A higher 112 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important. 113 * number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
184 166
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = !r->write_domain; 168 p->relocs[i].tv.shared = !r->write_domain;
187 p->relocs[i].handle = r->handle;
188 169
189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, 170 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
190 priority); 171 priority);
@@ -251,15 +232,15 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 232
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 233static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 234{
254 struct radeon_cs_reloc *reloc; 235 struct radeon_bo_list *reloc;
255 int r; 236 int r;
256 237
257 list_for_each_entry(reloc, &p->validated, tv.head) { 238 list_for_each_entry(reloc, &p->validated, tv.head) {
258 struct reservation_object *resv; 239 struct reservation_object *resv;
259 240
260 resv = reloc->robj->tbo.resv; 241 resv = reloc->robj->tbo.resv;
261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 242 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
262 reloc->tv.shared); 243 reloc->tv.shared);
263 if (r) 244 if (r)
264 return r; 245 return r;
265 } 246 }
@@ -282,13 +263,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
282 INIT_LIST_HEAD(&p->validated); 263 INIT_LIST_HEAD(&p->validated);
283 p->idx = 0; 264 p->idx = 0;
284 p->ib.sa_bo = NULL; 265 p->ib.sa_bo = NULL;
285 p->ib.semaphore = NULL;
286 p->const_ib.sa_bo = NULL; 266 p->const_ib.sa_bo = NULL;
287 p->const_ib.semaphore = NULL; 267 p->chunk_ib = NULL;
288 p->chunk_ib_idx = -1; 268 p->chunk_relocs = NULL;
289 p->chunk_relocs_idx = -1; 269 p->chunk_flags = NULL;
290 p->chunk_flags_idx = -1; 270 p->chunk_const_ib = NULL;
291 p->chunk_const_ib_idx = -1;
292 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 271 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
293 if (p->chunks_array == NULL) { 272 if (p->chunks_array == NULL) {
294 return -ENOMEM; 273 return -ENOMEM;
@@ -315,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
315 return -EFAULT; 294 return -EFAULT;
316 } 295 }
317 p->chunks[i].length_dw = user_chunk.length_dw; 296 p->chunks[i].length_dw = user_chunk.length_dw;
318 p->chunks[i].chunk_id = user_chunk.chunk_id; 297 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
319 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 298 p->chunk_relocs = &p->chunks[i];
320 p->chunk_relocs_idx = i;
321 } 299 }
322 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 300 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
323 p->chunk_ib_idx = i; 301 p->chunk_ib = &p->chunks[i];
324 /* zero length IB isn't useful */ 302 /* zero length IB isn't useful */
325 if (p->chunks[i].length_dw == 0) 303 if (p->chunks[i].length_dw == 0)
326 return -EINVAL; 304 return -EINVAL;
327 } 305 }
328 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { 306 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
329 p->chunk_const_ib_idx = i; 307 p->chunk_const_ib = &p->chunks[i];
330 /* zero length CONST IB isn't useful */ 308 /* zero length CONST IB isn't useful */
331 if (p->chunks[i].length_dw == 0) 309 if (p->chunks[i].length_dw == 0)
332 return -EINVAL; 310 return -EINVAL;
333 } 311 }
334 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 312 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
335 p->chunk_flags_idx = i; 313 p->chunk_flags = &p->chunks[i];
336 /* zero length flags aren't useful */ 314 /* zero length flags aren't useful */
337 if (p->chunks[i].length_dw == 0) 315 if (p->chunks[i].length_dw == 0)
338 return -EINVAL; 316 return -EINVAL;
@@ -341,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
341 size = p->chunks[i].length_dw; 319 size = p->chunks[i].length_dw;
342 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 320 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
343 p->chunks[i].user_ptr = cdata; 321 p->chunks[i].user_ptr = cdata;
344 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) 322 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
345 continue; 323 continue;
346 324
347 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 325 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
348 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) 326 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
349 continue; 327 continue;
350 } 328 }
@@ -357,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
357 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 335 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
358 return -EFAULT; 336 return -EFAULT;
359 } 337 }
360 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 338 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
361 p->cs_flags = p->chunks[i].kdata[0]; 339 p->cs_flags = p->chunks[i].kdata[0];
362 if (p->chunks[i].length_dw > 1) 340 if (p->chunks[i].length_dw > 1)
363 ring = p->chunks[i].kdata[1]; 341 ring = p->chunks[i].kdata[1];
@@ -398,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
398static int cmp_size_smaller_first(void *priv, struct list_head *a, 376static int cmp_size_smaller_first(void *priv, struct list_head *a,
399 struct list_head *b) 377 struct list_head *b)
400{ 378{
401 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); 379 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
402 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); 380 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
403 381
404 /* Sort A before B if A is smaller. */ 382 /* Sort A before B if A is smaller. */
405 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 383 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -440,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
440 418
441 if (parser->relocs != NULL) { 419 if (parser->relocs != NULL) {
442 for (i = 0; i < parser->nrelocs; i++) { 420 for (i = 0; i < parser->nrelocs; i++) {
443 if (parser->relocs[i].gobj) 421 struct radeon_bo *bo = parser->relocs[i].robj;
444 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 422 if (bo == NULL)
423 continue;
424
425 drm_gem_object_unreference_unlocked(&bo->gem_base);
445 } 426 }
446 } 427 }
447 kfree(parser->track); 428 kfree(parser->track);
448 kfree(parser->relocs); 429 kfree(parser->relocs);
449 kfree(parser->relocs_ptr);
450 drm_free_large(parser->vm_bos); 430 drm_free_large(parser->vm_bos);
451 for (i = 0; i < parser->nchunks; i++) 431 for (i = 0; i < parser->nchunks; i++)
452 drm_free_large(parser->chunks[i].kdata); 432 drm_free_large(parser->chunks[i].kdata);
@@ -461,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
461{ 441{
462 int r; 442 int r;
463 443
464 if (parser->chunk_ib_idx == -1) 444 if (parser->chunk_ib == NULL)
465 return 0; 445 return 0;
466 446
467 if (parser->cs_flags & RADEON_CS_USE_VM) 447 if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -521,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
521 for (i = 0; i < p->nrelocs; i++) { 501 for (i = 0; i < p->nrelocs; i++) {
522 struct radeon_bo *bo; 502 struct radeon_bo *bo;
523 503
524 /* ignore duplicates */
525 if (p->relocs_ptr[i] != &p->relocs[i])
526 continue;
527
528 bo = p->relocs[i].robj; 504 bo = p->relocs[i].robj;
529 bo_va = radeon_vm_bo_find(vm, bo); 505 bo_va = radeon_vm_bo_find(vm, bo);
530 if (bo_va == NULL) { 506 if (bo_va == NULL) {
@@ -535,6 +511,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
535 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); 511 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
536 if (r) 512 if (r)
537 return r; 513 return r;
514
515 radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
538 } 516 }
539 517
540 return radeon_vm_clear_invalids(rdev, vm); 518 return radeon_vm_clear_invalids(rdev, vm);
@@ -547,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
547 struct radeon_vm *vm = &fpriv->vm; 525 struct radeon_vm *vm = &fpriv->vm;
548 int r; 526 int r;
549 527
550 if (parser->chunk_ib_idx == -1) 528 if (parser->chunk_ib == NULL)
551 return 0; 529 return 0;
552 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 530 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
553 return 0; 531 return 0;
@@ -579,10 +557,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
579 DRM_ERROR("Failed to sync rings: %i\n", r); 557 DRM_ERROR("Failed to sync rings: %i\n", r);
580 goto out; 558 goto out;
581 } 559 }
582 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
583 560
584 if ((rdev->family >= CHIP_TAHITI) && 561 if ((rdev->family >= CHIP_TAHITI) &&
585 (parser->chunk_const_ib_idx != -1)) { 562 (parser->chunk_const_ib != NULL)) {
586 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); 563 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
587 } else { 564 } else {
588 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 565 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -609,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
609 struct radeon_vm *vm = NULL; 586 struct radeon_vm *vm = NULL;
610 int r; 587 int r;
611 588
612 if (parser->chunk_ib_idx == -1) 589 if (parser->chunk_ib == NULL)
613 return 0; 590 return 0;
614 591
615 if (parser->cs_flags & RADEON_CS_USE_VM) { 592 if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -617,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
617 vm = &fpriv->vm; 594 vm = &fpriv->vm;
618 595
619 if ((rdev->family >= CHIP_TAHITI) && 596 if ((rdev->family >= CHIP_TAHITI) &&
620 (parser->chunk_const_ib_idx != -1)) { 597 (parser->chunk_const_ib != NULL)) {
621 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; 598 ib_chunk = parser->chunk_const_ib;
622 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 599 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
623 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); 600 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
624 return -EINVAL; 601 return -EINVAL;
@@ -637,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
637 return -EFAULT; 614 return -EFAULT;
638 } 615 }
639 616
640 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 617 ib_chunk = parser->chunk_ib;
641 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 618 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
642 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); 619 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
643 return -EINVAL; 620 return -EINVAL;
644 } 621 }
645 } 622 }
646 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 623 ib_chunk = parser->chunk_ib;
647 624
648 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 625 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
649 vm, ib_chunk->length_dw * 4); 626 vm, ib_chunk->length_dw * 4);
@@ -735,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
735 struct radeon_cs_packet *pkt, 712 struct radeon_cs_packet *pkt,
736 unsigned idx) 713 unsigned idx)
737{ 714{
738 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
739 struct radeon_device *rdev = p->rdev; 716 struct radeon_device *rdev = p->rdev;
740 uint32_t header; 717 uint32_t header;
741 718
@@ -829,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
829 * GPU offset using the provided start. 806 * GPU offset using the provided start.
830 **/ 807 **/
831int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 808int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
832 struct radeon_cs_reloc **cs_reloc, 809 struct radeon_bo_list **cs_reloc,
833 int nomm) 810 int nomm)
834{ 811{
835 struct radeon_cs_chunk *relocs_chunk; 812 struct radeon_cs_chunk *relocs_chunk;
@@ -837,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
837 unsigned idx; 814 unsigned idx;
838 int r; 815 int r;
839 816
840 if (p->chunk_relocs_idx == -1) { 817 if (p->chunk_relocs == NULL) {
841 DRM_ERROR("No relocation chunk !\n"); 818 DRM_ERROR("No relocation chunk !\n");
842 return -EINVAL; 819 return -EINVAL;
843 } 820 }
844 *cs_reloc = NULL; 821 *cs_reloc = NULL;
845 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 822 relocs_chunk = p->chunk_relocs;
846 r = radeon_cs_packet_parse(p, &p3reloc, p->idx); 823 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
847 if (r) 824 if (r)
848 return r; 825 return r;
@@ -868,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
868 (u64)relocs_chunk->kdata[idx + 3] << 32; 845 (u64)relocs_chunk->kdata[idx + 3] << 32;
869 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; 846 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
870 } else 847 } else
871 *cs_reloc = p->relocs_ptr[(idx / 4)]; 848 *cs_reloc = &p->relocs[(idx / 4)];
872 return 0; 849 return 0;
873} 850}