diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_object.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.c | 158 |
1 files changed, 142 insertions, 16 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index bac0d06c52ac..e98cae3bf4a6 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
| @@ -44,6 +44,9 @@ struct radeon_object { | |||
| 44 | uint64_t gpu_addr; | 44 | uint64_t gpu_addr; |
| 45 | void *kptr; | 45 | void *kptr; |
| 46 | bool is_iomem; | 46 | bool is_iomem; |
| 47 | uint32_t tiling_flags; | ||
| 48 | uint32_t pitch; | ||
| 49 | int surface_reg; | ||
| 47 | }; | 50 | }; |
| 48 | 51 | ||
| 49 | int radeon_ttm_init(struct radeon_device *rdev); | 52 | int radeon_ttm_init(struct radeon_device *rdev); |
| @@ -70,6 +73,7 @@ static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) | |||
| 70 | 73 | ||
| 71 | robj = container_of(tobj, struct radeon_object, tobj); | 74 | robj = container_of(tobj, struct radeon_object, tobj); |
| 72 | list_del_init(&robj->list); | 75 | list_del_init(&robj->list); |
| 76 | radeon_object_clear_surface_reg(robj); | ||
| 73 | kfree(robj); | 77 | kfree(robj); |
| 74 | } | 78 | } |
| 75 | 79 | ||
| @@ -99,16 +103,16 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain) | |||
| 99 | { | 103 | { |
| 100 | uint32_t flags = 0; | 104 | uint32_t flags = 0; |
| 101 | if (domain & RADEON_GEM_DOMAIN_VRAM) { | 105 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
| 102 | flags |= TTM_PL_FLAG_VRAM; | 106 | flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; |
| 103 | } | 107 | } |
| 104 | if (domain & RADEON_GEM_DOMAIN_GTT) { | 108 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
| 105 | flags |= TTM_PL_FLAG_TT; | 109 | flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
| 106 | } | 110 | } |
| 107 | if (domain & RADEON_GEM_DOMAIN_CPU) { | 111 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
| 108 | flags |= TTM_PL_FLAG_SYSTEM; | 112 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
| 109 | } | 113 | } |
| 110 | if (!flags) { | 114 | if (!flags) { |
| 111 | flags |= TTM_PL_FLAG_SYSTEM; | 115 | flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING; |
| 112 | } | 116 | } |
| 113 | return flags; | 117 | return flags; |
| 114 | } | 118 | } |
| @@ -141,6 +145,7 @@ int radeon_object_create(struct radeon_device *rdev, | |||
| 141 | } | 145 | } |
| 142 | robj->rdev = rdev; | 146 | robj->rdev = rdev; |
| 143 | robj->gobj = gobj; | 147 | robj->gobj = gobj; |
| 148 | robj->surface_reg = -1; | ||
| 144 | INIT_LIST_HEAD(&robj->list); | 149 | INIT_LIST_HEAD(&robj->list); |
| 145 | 150 | ||
| 146 | flags = radeon_object_flags_from_domain(domain); | 151 | flags = radeon_object_flags_from_domain(domain); |
| @@ -304,7 +309,7 @@ int radeon_object_wait(struct radeon_object *robj) | |||
| 304 | } | 309 | } |
| 305 | spin_lock(&robj->tobj.lock); | 310 | spin_lock(&robj->tobj.lock); |
| 306 | if (robj->tobj.sync_obj) { | 311 | if (robj->tobj.sync_obj) { |
| 307 | r = ttm_bo_wait(&robj->tobj, true, false, false); | 312 | r = ttm_bo_wait(&robj->tobj, true, true, false); |
| 308 | } | 313 | } |
| 309 | spin_unlock(&robj->tobj.lock); | 314 | spin_unlock(&robj->tobj.lock); |
| 310 | radeon_object_unreserve(robj); | 315 | radeon_object_unreserve(robj); |
| @@ -403,7 +408,6 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 403 | struct radeon_object *robj; | 408 | struct radeon_object *robj; |
| 404 | struct radeon_fence *old_fence = NULL; | 409 | struct radeon_fence *old_fence = NULL; |
| 405 | struct list_head *i; | 410 | struct list_head *i; |
| 406 | uint32_t flags; | ||
| 407 | int r; | 411 | int r; |
| 408 | 412 | ||
| 409 | r = radeon_object_list_reserve(head); | 413 | r = radeon_object_list_reserve(head); |
| @@ -414,27 +418,25 @@ int radeon_object_list_validate(struct list_head *head, void *fence) | |||
| 414 | list_for_each(i, head) { | 418 | list_for_each(i, head) { |
| 415 | lobj = list_entry(i, struct radeon_object_list, list); | 419 | lobj = list_entry(i, struct radeon_object_list, list); |
| 416 | robj = lobj->robj; | 420 | robj = lobj->robj; |
| 417 | if (lobj->wdomain) { | ||
| 418 | flags = radeon_object_flags_from_domain(lobj->wdomain); | ||
| 419 | flags |= TTM_PL_FLAG_TT; | ||
| 420 | } else { | ||
| 421 | flags = radeon_object_flags_from_domain(lobj->rdomain); | ||
| 422 | flags |= TTM_PL_FLAG_TT; | ||
| 423 | flags |= TTM_PL_FLAG_VRAM; | ||
| 424 | } | ||
| 425 | if (!robj->pin_count) { | 421 | if (!robj->pin_count) { |
| 426 | robj->tobj.proposed_placement = flags | TTM_PL_MASK_CACHING; | 422 | if (lobj->wdomain) { |
| 423 | robj->tobj.proposed_placement = | ||
| 424 | radeon_object_flags_from_domain(lobj->wdomain); | ||
| 425 | } else { | ||
| 426 | robj->tobj.proposed_placement = | ||
| 427 | radeon_object_flags_from_domain(lobj->rdomain); | ||
| 428 | } | ||
| 427 | r = ttm_buffer_object_validate(&robj->tobj, | 429 | r = ttm_buffer_object_validate(&robj->tobj, |
| 428 | robj->tobj.proposed_placement, | 430 | robj->tobj.proposed_placement, |
| 429 | true, false); | 431 | true, false); |
| 430 | if (unlikely(r)) { | 432 | if (unlikely(r)) { |
| 431 | radeon_object_list_unreserve(head); | ||
| 432 | DRM_ERROR("radeon: failed to validate.\n"); | 433 | DRM_ERROR("radeon: failed to validate.\n"); |
| 433 | return r; | 434 | return r; |
| 434 | } | 435 | } |
| 435 | radeon_object_gpu_addr(robj); | 436 | radeon_object_gpu_addr(robj); |
| 436 | } | 437 | } |
| 437 | lobj->gpu_offset = robj->gpu_addr; | 438 | lobj->gpu_offset = robj->gpu_addr; |
| 439 | lobj->tiling_flags = robj->tiling_flags; | ||
| 438 | if (fence) { | 440 | if (fence) { |
| 439 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; | 441 | old_fence = (struct radeon_fence *)robj->tobj.sync_obj; |
| 440 | robj->tobj.sync_obj = radeon_fence_ref(fence); | 442 | robj->tobj.sync_obj = radeon_fence_ref(fence); |
| @@ -479,3 +481,127 @@ unsigned long radeon_object_size(struct radeon_object *robj) | |||
| 479 | { | 481 | { |
| 480 | return robj->tobj.num_pages << PAGE_SHIFT; | 482 | return robj->tobj.num_pages << PAGE_SHIFT; |
| 481 | } | 483 | } |
| 484 | |||
| 485 | int radeon_object_get_surface_reg(struct radeon_object *robj) | ||
| 486 | { | ||
| 487 | struct radeon_device *rdev = robj->rdev; | ||
| 488 | struct radeon_surface_reg *reg; | ||
| 489 | struct radeon_object *old_object; | ||
| 490 | int steal; | ||
| 491 | int i; | ||
| 492 | |||
| 493 | if (!robj->tiling_flags) | ||
| 494 | return 0; | ||
| 495 | |||
| 496 | if (robj->surface_reg >= 0) { | ||
| 497 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
| 498 | i = robj->surface_reg; | ||
| 499 | goto out; | ||
| 500 | } | ||
| 501 | |||
| 502 | steal = -1; | ||
| 503 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | ||
| 504 | |||
| 505 | reg = &rdev->surface_regs[i]; | ||
| 506 | if (!reg->robj) | ||
| 507 | break; | ||
| 508 | |||
| 509 | old_object = reg->robj; | ||
| 510 | if (old_object->pin_count == 0) | ||
| 511 | steal = i; | ||
| 512 | } | ||
| 513 | |||
| 514 | /* if we are all out */ | ||
| 515 | if (i == RADEON_GEM_MAX_SURFACES) { | ||
| 516 | if (steal == -1) | ||
| 517 | return -ENOMEM; | ||
| 518 | /* find someone with a surface reg and nuke their BO */ | ||
| 519 | reg = &rdev->surface_regs[steal]; | ||
| 520 | old_object = reg->robj; | ||
| 521 | /* blow away the mapping */ | ||
| 522 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | ||
| 523 | ttm_bo_unmap_virtual(&old_object->tobj); | ||
| 524 | old_object->surface_reg = -1; | ||
| 525 | i = steal; | ||
| 526 | } | ||
| 527 | |||
| 528 | robj->surface_reg = i; | ||
| 529 | reg->robj = robj; | ||
| 530 | |||
| 531 | out: | ||
| 532 | radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, | ||
| 533 | robj->tobj.mem.mm_node->start << PAGE_SHIFT, | ||
| 534 | robj->tobj.num_pages << PAGE_SHIFT); | ||
| 535 | return 0; | ||
| 536 | } | ||
| 537 | |||
| 538 | void radeon_object_clear_surface_reg(struct radeon_object *robj) | ||
| 539 | { | ||
| 540 | struct radeon_device *rdev = robj->rdev; | ||
| 541 | struct radeon_surface_reg *reg; | ||
| 542 | |||
| 543 | if (robj->surface_reg == -1) | ||
| 544 | return; | ||
| 545 | |||
| 546 | reg = &rdev->surface_regs[robj->surface_reg]; | ||
| 547 | radeon_clear_surface_reg(rdev, robj->surface_reg); | ||
| 548 | |||
| 549 | reg->robj = NULL; | ||
| 550 | robj->surface_reg = -1; | ||
| 551 | } | ||
| 552 | |||
| 553 | void radeon_object_set_tiling_flags(struct radeon_object *robj, | ||
| 554 | uint32_t tiling_flags, uint32_t pitch) | ||
| 555 | { | ||
| 556 | robj->tiling_flags = tiling_flags; | ||
| 557 | robj->pitch = pitch; | ||
| 558 | } | ||
| 559 | |||
| 560 | void radeon_object_get_tiling_flags(struct radeon_object *robj, | ||
| 561 | uint32_t *tiling_flags, | ||
| 562 | uint32_t *pitch) | ||
| 563 | { | ||
| 564 | if (tiling_flags) | ||
| 565 | *tiling_flags = robj->tiling_flags; | ||
| 566 | if (pitch) | ||
| 567 | *pitch = robj->pitch; | ||
| 568 | } | ||
| 569 | |||
| 570 | int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, | ||
| 571 | bool force_drop) | ||
| 572 | { | ||
| 573 | if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) | ||
| 574 | return 0; | ||
| 575 | |||
| 576 | if (force_drop) { | ||
| 577 | radeon_object_clear_surface_reg(robj); | ||
| 578 | return 0; | ||
| 579 | } | ||
| 580 | |||
| 581 | if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { | ||
| 582 | if (!has_moved) | ||
| 583 | return 0; | ||
| 584 | |||
| 585 | if (robj->surface_reg >= 0) | ||
| 586 | radeon_object_clear_surface_reg(robj); | ||
| 587 | return 0; | ||
| 588 | } | ||
| 589 | |||
| 590 | if ((robj->surface_reg >= 0) && !has_moved) | ||
| 591 | return 0; | ||
| 592 | |||
| 593 | return radeon_object_get_surface_reg(robj); | ||
| 594 | } | ||
| 595 | |||
| 596 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | ||
| 597 | struct ttm_mem_reg *mem) | ||
| 598 | { | ||
| 599 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
| 600 | radeon_object_check_tiling(robj, 0, 1); | ||
| 601 | } | ||
| 602 | |||
| 603 | void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | ||
| 604 | { | ||
| 605 | struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); | ||
| 606 | radeon_object_check_tiling(robj, 0, 0); | ||
| 607 | } | ||
