diff options
author | Dave Airlie <airlied@redhat.com> | 2012-10-15 20:13:24 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2012-10-15 20:14:28 -0400 |
commit | 39df01cd6ce9f6dd755ace0030e2bebe75da7727 (patch) | |
tree | 6945f240913d41c1b1a6d0b78cfc1903cf4ba40e /drivers/gpu | |
parent | 3459f6204730fc3c0e44059296f6daa592e8c8d6 (diff) | |
parent | 8ad33cdf97639e2a1601b6dd8629d351c1c50198 (diff) |
Merge branch 'drm-fixes-3.7' of git://people.freedesktop.org/~agd5f/linux into drm-fixes
Alex writes:
"This is the first -fixes pull for 3.7. I would have preferred
to have gotten it out a bit sooner, but I was on holiday last week.
- Cleanup of the new 2 level page table code it get it in
better shape and using less memory.
- Fix some display issues related to the PLL rework.
- Fix some cmpiler warnings and errors with certain config
options.
- Other misc bug fixes."
* 'drm-fixes-3.7' of git://people.freedesktop.org/~agd5f/linux:
drm/radeon: fix spelling typos in debugging output
drm/radeon: Don't destroy I2C Bus Rec in radeon_ext_tmds_enc_destroy().
drm/radeon: check if pcie gen 2 is already enabled (v2)
drm/radeon/cayman: set VM max pfn at MC init
drm/radeon: separate pt alloc from lru add
drm/radeon: don't add the IB pool to all VMs v2
drm/radeon: allocate page tables on demand v4
drm/radeon: update comments to clarify VM setup (v2)
drm/radeon: allocate PPLLs from low to high
drm/radeon: fix compilation with backlight disabled
drm/radeon: use %zu for formatting size_t
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/radeon/atombios_crtc.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_acpi.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_gart.c | 374 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_kms.c | 22 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_legacy_encoders.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 7 |
13 files changed, 359 insertions, 152 deletions
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 96184d02c8d9..2e566e123e9e 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -1690,10 +1690,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
1690 | } | 1690 | } |
1691 | /* all other cases */ | 1691 | /* all other cases */ |
1692 | pll_in_use = radeon_get_pll_use_mask(crtc); | 1692 | pll_in_use = radeon_get_pll_use_mask(crtc); |
1693 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
1694 | return ATOM_PPLL2; | ||
1695 | if (!(pll_in_use & (1 << ATOM_PPLL1))) | 1693 | if (!(pll_in_use & (1 << ATOM_PPLL1))) |
1696 | return ATOM_PPLL1; | 1694 | return ATOM_PPLL1; |
1695 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
1696 | return ATOM_PPLL2; | ||
1697 | DRM_ERROR("unable to allocate a PPLL\n"); | 1697 | DRM_ERROR("unable to allocate a PPLL\n"); |
1698 | return ATOM_PPLL_INVALID; | 1698 | return ATOM_PPLL_INVALID; |
1699 | } else { | 1699 | } else { |
@@ -1715,10 +1715,10 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) | |||
1715 | } | 1715 | } |
1716 | /* all other cases */ | 1716 | /* all other cases */ |
1717 | pll_in_use = radeon_get_pll_use_mask(crtc); | 1717 | pll_in_use = radeon_get_pll_use_mask(crtc); |
1718 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
1719 | return ATOM_PPLL2; | ||
1720 | if (!(pll_in_use & (1 << ATOM_PPLL1))) | 1718 | if (!(pll_in_use & (1 << ATOM_PPLL1))) |
1721 | return ATOM_PPLL1; | 1719 | return ATOM_PPLL1; |
1720 | if (!(pll_in_use & (1 << ATOM_PPLL2))) | ||
1721 | return ATOM_PPLL2; | ||
1722 | DRM_ERROR("unable to allocate a PPLL\n"); | 1722 | DRM_ERROR("unable to allocate a PPLL\n"); |
1723 | return ATOM_PPLL_INVALID; | 1723 | return ATOM_PPLL_INVALID; |
1724 | } else { | 1724 | } else { |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a1f49c5fd74b..14313ad43b76 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -3431,9 +3431,14 @@ void evergreen_pcie_gen2_enable(struct radeon_device *rdev) | |||
3431 | if (!(mask & DRM_PCIE_SPEED_50)) | 3431 | if (!(mask & DRM_PCIE_SPEED_50)) |
3432 | return; | 3432 | return; |
3433 | 3433 | ||
3434 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3435 | if (speed_cntl & LC_CURRENT_DATA_RATE) { | ||
3436 | DRM_INFO("PCIE gen 2 link speeds already enabled\n"); | ||
3437 | return; | ||
3438 | } | ||
3439 | |||
3434 | DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); | 3440 | DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); |
3435 | 3441 | ||
3436 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3437 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || | 3442 | if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) || |
3438 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { | 3443 | (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) { |
3439 | 3444 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 8bcb554ea0c5..8c74c729586d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -770,9 +770,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev) | |||
770 | WREG32(0x15DC, 0); | 770 | WREG32(0x15DC, 0); |
771 | 771 | ||
772 | /* empty context1-7 */ | 772 | /* empty context1-7 */ |
773 | /* Assign the pt base to something valid for now; the pts used for | ||
774 | * the VMs are determined by the application and setup and assigned | ||
775 | * on the fly in the vm part of radeon_gart.c | ||
776 | */ | ||
773 | for (i = 1; i < 8; i++) { | 777 | for (i = 1; i < 8; i++) { |
774 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); | 778 | WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0); |
775 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0); | 779 | WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn); |
776 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), | 780 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |
777 | rdev->gart.table_addr >> 12); | 781 | rdev->gart.table_addr >> 12); |
778 | } | 782 | } |
@@ -1572,12 +1576,6 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) | |||
1572 | if (vm == NULL) | 1576 | if (vm == NULL) |
1573 | return; | 1577 | return; |
1574 | 1578 | ||
1575 | radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0)); | ||
1576 | radeon_ring_write(ring, 0); | ||
1577 | |||
1578 | radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0)); | ||
1579 | radeon_ring_write(ring, vm->last_pfn); | ||
1580 | |||
1581 | radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); | 1579 | radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0)); |
1582 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); | 1580 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); |
1583 | 1581 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 70c800ff6190..cda280d157da 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -3703,6 +3703,12 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) | |||
3703 | if (!(mask & DRM_PCIE_SPEED_50)) | 3703 | if (!(mask & DRM_PCIE_SPEED_50)) |
3704 | return; | 3704 | return; |
3705 | 3705 | ||
3706 | speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL); | ||
3707 | if (speed_cntl & LC_CURRENT_DATA_RATE) { | ||
3708 | DRM_INFO("PCIE gen 2 link speeds already enabled\n"); | ||
3709 | return; | ||
3710 | } | ||
3711 | |||
3706 | DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); | 3712 | DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n"); |
3707 | 3713 | ||
3708 | /* 55 nm r6xx asics */ | 3714 | /* 55 nm r6xx asics */ |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b04c06444d8b..8c42d54c2e26 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -663,9 +663,14 @@ struct radeon_vm { | |||
663 | struct list_head list; | 663 | struct list_head list; |
664 | struct list_head va; | 664 | struct list_head va; |
665 | unsigned id; | 665 | unsigned id; |
666 | unsigned last_pfn; | 666 | |
667 | u64 pd_gpu_addr; | 667 | /* contains the page directory */ |
668 | struct radeon_sa_bo *sa_bo; | 668 | struct radeon_sa_bo *page_directory; |
669 | uint64_t pd_gpu_addr; | ||
670 | |||
671 | /* array of page tables, one for each page directory entry */ | ||
672 | struct radeon_sa_bo **page_tables; | ||
673 | |||
669 | struct mutex mutex; | 674 | struct mutex mutex; |
670 | /* last fence for cs using this vm */ | 675 | /* last fence for cs using this vm */ |
671 | struct radeon_fence *fence; | 676 | struct radeon_fence *fence; |
@@ -1843,9 +1848,10 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size | |||
1843 | */ | 1848 | */ |
1844 | int radeon_vm_manager_init(struct radeon_device *rdev); | 1849 | int radeon_vm_manager_init(struct radeon_device *rdev); |
1845 | void radeon_vm_manager_fini(struct radeon_device *rdev); | 1850 | void radeon_vm_manager_fini(struct radeon_device *rdev); |
1846 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); | 1851 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); |
1847 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); | 1852 | void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); |
1848 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); | 1853 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); |
1854 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); | ||
1849 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, | 1855 | struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
1850 | struct radeon_vm *vm, int ring); | 1856 | struct radeon_vm *vm, int ring); |
1851 | void radeon_vm_fence(struct radeon_device *rdev, | 1857 | void radeon_vm_fence(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index b0a5688c67f8..196d28d99570 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c | |||
@@ -201,7 +201,7 @@ static int radeon_atif_verify_interface(acpi_handle handle, | |||
201 | 201 | ||
202 | size = *(u16 *) info->buffer.pointer; | 202 | size = *(u16 *) info->buffer.pointer; |
203 | if (size < 12) { | 203 | if (size < 12) { |
204 | DRM_INFO("ATIF buffer is too small: %lu\n", size); | 204 | DRM_INFO("ATIF buffer is too small: %zu\n", size); |
205 | err = -EINVAL; | 205 | err = -EINVAL; |
206 | goto out; | 206 | goto out; |
207 | } | 207 | } |
@@ -370,6 +370,7 @@ int radeon_atif_handler(struct radeon_device *rdev, | |||
370 | 370 | ||
371 | radeon_set_backlight_level(rdev, enc, req.backlight_level); | 371 | radeon_set_backlight_level(rdev, enc, req.backlight_level); |
372 | 372 | ||
373 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
373 | if (rdev->is_atom_bios) { | 374 | if (rdev->is_atom_bios) { |
374 | struct radeon_encoder_atom_dig *dig = enc->enc_priv; | 375 | struct radeon_encoder_atom_dig *dig = enc->enc_priv; |
375 | backlight_force_update(dig->bl_dev, | 376 | backlight_force_update(dig->bl_dev, |
@@ -379,6 +380,7 @@ int radeon_atif_handler(struct radeon_device *rdev, | |||
379 | backlight_force_update(dig->bl_dev, | 380 | backlight_force_update(dig->bl_dev, |
380 | BACKLIGHT_UPDATE_HOTKEY); | 381 | BACKLIGHT_UPDATE_HOTKEY); |
381 | } | 382 | } |
383 | #endif | ||
382 | } | 384 | } |
383 | } | 385 | } |
384 | /* TODO: check other events */ | 386 | /* TODO: check other events */ |
@@ -485,7 +487,7 @@ static int radeon_atcs_verify_interface(acpi_handle handle, | |||
485 | 487 | ||
486 | size = *(u16 *) info->buffer.pointer; | 488 | size = *(u16 *) info->buffer.pointer; |
487 | if (size < 8) { | 489 | if (size < 8) { |
488 | DRM_INFO("ATCS buffer is too small: %lu\n", size); | 490 | DRM_INFO("ATCS buffer is too small: %zu\n", size); |
489 | err = -EINVAL; | 491 | err = -EINVAL; |
490 | goto out; | 492 | goto out; |
491 | } | 493 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index cb7b7c062fef..41672cc563fb 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -478,6 +478,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, | |||
478 | } | 478 | } |
479 | 479 | ||
480 | out: | 480 | out: |
481 | radeon_vm_add_to_lru(rdev, vm); | ||
481 | mutex_unlock(&vm->mutex); | 482 | mutex_unlock(&vm->mutex); |
482 | mutex_unlock(&rdev->vm_manager.lock); | 483 | mutex_unlock(&rdev->vm_manager.lock); |
483 | return r; | 484 | return r; |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 64a42647f08a..bd13ca09eb62 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1018,6 +1018,10 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1018 | return r; | 1018 | return r; |
1019 | /* initialize vm here */ | 1019 | /* initialize vm here */ |
1020 | mutex_init(&rdev->vm_manager.lock); | 1020 | mutex_init(&rdev->vm_manager.lock); |
1021 | /* Adjust VM size here. | ||
1022 | * Currently set to 4GB ((1 << 20) 4k pages). | ||
1023 | * Max GPUVM size for cayman and SI is 40 bits. | ||
1024 | */ | ||
1021 | rdev->vm_manager.max_pfn = 1 << 20; | 1025 | rdev->vm_manager.max_pfn = 1 << 20; |
1022 | INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); | 1026 | INIT_LIST_HEAD(&rdev->vm_manager.lru_vm); |
1023 | 1027 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index f0c06d196b75..a7677dd1ce98 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -423,6 +423,18 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
423 | */ | 423 | */ |
424 | 424 | ||
425 | /** | 425 | /** |
426 | * radeon_vm_num_pde - return the number of page directory entries | ||
427 | * | ||
428 | * @rdev: radeon_device pointer | ||
429 | * | ||
430 | * Calculate the number of page directory entries (cayman+). | ||
431 | */ | ||
432 | static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) | ||
433 | { | ||
434 | return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE; | ||
435 | } | ||
436 | |||
437 | /** | ||
426 | * radeon_vm_directory_size - returns the size of the page directory in bytes | 438 | * radeon_vm_directory_size - returns the size of the page directory in bytes |
427 | * | 439 | * |
428 | * @rdev: radeon_device pointer | 440 | * @rdev: radeon_device pointer |
@@ -431,7 +443,7 @@ void radeon_gart_fini(struct radeon_device *rdev) | |||
431 | */ | 443 | */ |
432 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) | 444 | static unsigned radeon_vm_directory_size(struct radeon_device *rdev) |
433 | { | 445 | { |
434 | return (rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE) * 8; | 446 | return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); |
435 | } | 447 | } |
436 | 448 | ||
437 | /** | 449 | /** |
@@ -451,11 +463,11 @@ int radeon_vm_manager_init(struct radeon_device *rdev) | |||
451 | 463 | ||
452 | if (!rdev->vm_manager.enabled) { | 464 | if (!rdev->vm_manager.enabled) { |
453 | /* allocate enough for 2 full VM pts */ | 465 | /* allocate enough for 2 full VM pts */ |
454 | size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); | 466 | size = radeon_vm_directory_size(rdev); |
455 | size += RADEON_GPU_PAGE_ALIGN(rdev->vm_manager.max_pfn * 8); | 467 | size += rdev->vm_manager.max_pfn * 8; |
456 | size *= 2; | 468 | size *= 2; |
457 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, | 469 | r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, |
458 | size, | 470 | RADEON_GPU_PAGE_ALIGN(size), |
459 | RADEON_GEM_DOMAIN_VRAM); | 471 | RADEON_GEM_DOMAIN_VRAM); |
460 | if (r) { | 472 | if (r) { |
461 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", | 473 | dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", |
@@ -476,7 +488,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) | |||
476 | 488 | ||
477 | /* restore page table */ | 489 | /* restore page table */ |
478 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { | 490 | list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) { |
479 | if (vm->sa_bo == NULL) | 491 | if (vm->page_directory == NULL) |
480 | continue; | 492 | continue; |
481 | 493 | ||
482 | list_for_each_entry(bo_va, &vm->va, vm_list) { | 494 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
@@ -500,16 +512,25 @@ static void radeon_vm_free_pt(struct radeon_device *rdev, | |||
500 | struct radeon_vm *vm) | 512 | struct radeon_vm *vm) |
501 | { | 513 | { |
502 | struct radeon_bo_va *bo_va; | 514 | struct radeon_bo_va *bo_va; |
515 | int i; | ||
503 | 516 | ||
504 | if (!vm->sa_bo) | 517 | if (!vm->page_directory) |
505 | return; | 518 | return; |
506 | 519 | ||
507 | list_del_init(&vm->list); | 520 | list_del_init(&vm->list); |
508 | radeon_sa_bo_free(rdev, &vm->sa_bo, vm->fence); | 521 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); |
509 | 522 | ||
510 | list_for_each_entry(bo_va, &vm->va, vm_list) { | 523 | list_for_each_entry(bo_va, &vm->va, vm_list) { |
511 | bo_va->valid = false; | 524 | bo_va->valid = false; |
512 | } | 525 | } |
526 | |||
527 | if (vm->page_tables == NULL) | ||
528 | return; | ||
529 | |||
530 | for (i = 0; i < radeon_vm_num_pdes(rdev); i++) | ||
531 | radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence); | ||
532 | |||
533 | kfree(vm->page_tables); | ||
513 | } | 534 | } |
514 | 535 | ||
515 | /** | 536 | /** |
@@ -546,63 +567,106 @@ void radeon_vm_manager_fini(struct radeon_device *rdev) | |||
546 | } | 567 | } |
547 | 568 | ||
548 | /** | 569 | /** |
570 | * radeon_vm_evict - evict page table to make room for new one | ||
571 | * | ||
572 | * @rdev: radeon_device pointer | ||
573 | * @vm: VM we want to allocate something for | ||
574 | * | ||
575 | * Evict a VM from the lru, making sure that it isn't @vm. (cayman+). | ||
576 | * Returns 0 for success, -ENOMEM for failure. | ||
577 | * | ||
578 | * Global and local mutex must be locked! | ||
579 | */ | ||
580 | int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm) | ||
581 | { | ||
582 | struct radeon_vm *vm_evict; | ||
583 | |||
584 | if (list_empty(&rdev->vm_manager.lru_vm)) | ||
585 | return -ENOMEM; | ||
586 | |||
587 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, | ||
588 | struct radeon_vm, list); | ||
589 | if (vm_evict == vm) | ||
590 | return -ENOMEM; | ||
591 | |||
592 | mutex_lock(&vm_evict->mutex); | ||
593 | radeon_vm_free_pt(rdev, vm_evict); | ||
594 | mutex_unlock(&vm_evict->mutex); | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /** | ||
549 | * radeon_vm_alloc_pt - allocates a page table for a VM | 599 | * radeon_vm_alloc_pt - allocates a page table for a VM |
550 | * | 600 | * |
551 | * @rdev: radeon_device pointer | 601 | * @rdev: radeon_device pointer |
552 | * @vm: vm to bind | 602 | * @vm: vm to bind |
553 | * | 603 | * |
554 | * Allocate a page table for the requested vm (cayman+). | 604 | * Allocate a page table for the requested vm (cayman+). |
555 | * Also starts to populate the page table. | ||
556 | * Returns 0 for success, error for failure. | 605 | * Returns 0 for success, error for failure. |
557 | * | 606 | * |
558 | * Global and local mutex must be locked! | 607 | * Global and local mutex must be locked! |
559 | */ | 608 | */ |
560 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) | 609 | int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) |
561 | { | 610 | { |
562 | struct radeon_vm *vm_evict; | 611 | unsigned pd_size, pts_size; |
563 | int r; | ||
564 | u64 *pd_addr; | 612 | u64 *pd_addr; |
565 | int tables_size; | 613 | int r; |
566 | 614 | ||
567 | if (vm == NULL) { | 615 | if (vm == NULL) { |
568 | return -EINVAL; | 616 | return -EINVAL; |
569 | } | 617 | } |
570 | 618 | ||
571 | /* allocate enough to cover the current VM size */ | 619 | if (vm->page_directory != NULL) { |
572 | tables_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); | ||
573 | tables_size += RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8); | ||
574 | |||
575 | if (vm->sa_bo != NULL) { | ||
576 | /* update lru */ | ||
577 | list_del_init(&vm->list); | ||
578 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); | ||
579 | return 0; | 620 | return 0; |
580 | } | 621 | } |
581 | 622 | ||
582 | retry: | 623 | retry: |
583 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo, | 624 | pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev)); |
584 | tables_size, RADEON_GPU_PAGE_SIZE, false); | 625 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, |
626 | &vm->page_directory, pd_size, | ||
627 | RADEON_GPU_PAGE_SIZE, false); | ||
585 | if (r == -ENOMEM) { | 628 | if (r == -ENOMEM) { |
586 | if (list_empty(&rdev->vm_manager.lru_vm)) { | 629 | r = radeon_vm_evict(rdev, vm); |
630 | if (r) | ||
587 | return r; | 631 | return r; |
588 | } | ||
589 | vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list); | ||
590 | mutex_lock(&vm_evict->mutex); | ||
591 | radeon_vm_free_pt(rdev, vm_evict); | ||
592 | mutex_unlock(&vm_evict->mutex); | ||
593 | goto retry; | 632 | goto retry; |
594 | 633 | ||
595 | } else if (r) { | 634 | } else if (r) { |
596 | return r; | 635 | return r; |
597 | } | 636 | } |
598 | 637 | ||
599 | pd_addr = radeon_sa_bo_cpu_addr(vm->sa_bo); | 638 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory); |
600 | vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo); | 639 | |
601 | memset(pd_addr, 0, tables_size); | 640 | /* Initially clear the page directory */ |
641 | pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory); | ||
642 | memset(pd_addr, 0, pd_size); | ||
643 | |||
644 | pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *); | ||
645 | vm->page_tables = kzalloc(pts_size, GFP_KERNEL); | ||
646 | |||
647 | if (vm->page_tables == NULL) { | ||
648 | DRM_ERROR("Cannot allocate memory for page table array\n"); | ||
649 | radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence); | ||
650 | return -ENOMEM; | ||
651 | } | ||
652 | |||
653 | return 0; | ||
654 | } | ||
602 | 655 | ||
656 | /** | ||
657 | * radeon_vm_add_to_lru - add VMs page table to LRU list | ||
658 | * | ||
659 | * @rdev: radeon_device pointer | ||
660 | * @vm: vm to add to LRU | ||
661 | * | ||
662 | * Add the allocated page table to the LRU list (cayman+). | ||
663 | * | ||
664 | * Global mutex must be locked! | ||
665 | */ | ||
666 | void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm) | ||
667 | { | ||
668 | list_del_init(&vm->list); | ||
603 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); | 669 | list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); |
604 | return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo, | ||
605 | &rdev->ring_tmp_bo.bo->tbo.mem); | ||
606 | } | 670 | } |
607 | 671 | ||
608 | /** | 672 | /** |
@@ -793,20 +857,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev, | |||
793 | } | 857 | } |
794 | 858 | ||
795 | mutex_lock(&vm->mutex); | 859 | mutex_lock(&vm->mutex); |
796 | if (last_pfn > vm->last_pfn) { | ||
797 | /* release mutex and lock in right order */ | ||
798 | mutex_unlock(&vm->mutex); | ||
799 | mutex_lock(&rdev->vm_manager.lock); | ||
800 | mutex_lock(&vm->mutex); | ||
801 | /* and check again */ | ||
802 | if (last_pfn > vm->last_pfn) { | ||
803 | /* grow va space 32M by 32M */ | ||
804 | unsigned align = ((32 << 20) >> 12) - 1; | ||
805 | radeon_vm_free_pt(rdev, vm); | ||
806 | vm->last_pfn = (last_pfn + align) & ~align; | ||
807 | } | ||
808 | mutex_unlock(&rdev->vm_manager.lock); | ||
809 | } | ||
810 | head = &vm->va; | 860 | head = &vm->va; |
811 | last_offset = 0; | 861 | last_offset = 0; |
812 | list_for_each_entry(tmp, &vm->va, vm_list) { | 862 | list_for_each_entry(tmp, &vm->va, vm_list) { |
@@ -865,6 +915,155 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) | |||
865 | } | 915 | } |
866 | 916 | ||
867 | /** | 917 | /** |
918 | * radeon_vm_update_pdes - make sure that page directory is valid | ||
919 | * | ||
920 | * @rdev: radeon_device pointer | ||
921 | * @vm: requested vm | ||
922 | * @start: start of GPU address range | ||
923 | * @end: end of GPU address range | ||
924 | * | ||
925 | * Allocates new page tables if necessary | ||
926 | * and updates the page directory (cayman+). | ||
927 | * Returns 0 for success, error for failure. | ||
928 | * | ||
929 | * Global and local mutex must be locked! | ||
930 | */ | ||
931 | static int radeon_vm_update_pdes(struct radeon_device *rdev, | ||
932 | struct radeon_vm *vm, | ||
933 | uint64_t start, uint64_t end) | ||
934 | { | ||
935 | static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; | ||
936 | |||
937 | uint64_t last_pde = ~0, last_pt = ~0; | ||
938 | unsigned count = 0; | ||
939 | uint64_t pt_idx; | ||
940 | int r; | ||
941 | |||
942 | start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; | ||
943 | end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE; | ||
944 | |||
945 | /* walk over the address space and update the page directory */ | ||
946 | for (pt_idx = start; pt_idx <= end; ++pt_idx) { | ||
947 | uint64_t pde, pt; | ||
948 | |||
949 | if (vm->page_tables[pt_idx]) | ||
950 | continue; | ||
951 | |||
952 | retry: | ||
953 | r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, | ||
954 | &vm->page_tables[pt_idx], | ||
955 | RADEON_VM_PTE_COUNT * 8, | ||
956 | RADEON_GPU_PAGE_SIZE, false); | ||
957 | |||
958 | if (r == -ENOMEM) { | ||
959 | r = radeon_vm_evict(rdev, vm); | ||
960 | if (r) | ||
961 | return r; | ||
962 | goto retry; | ||
963 | } else if (r) { | ||
964 | return r; | ||
965 | } | ||
966 | |||
967 | pde = vm->pd_gpu_addr + pt_idx * 8; | ||
968 | |||
969 | pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); | ||
970 | |||
971 | if (((last_pde + 8 * count) != pde) || | ||
972 | ((last_pt + incr * count) != pt)) { | ||
973 | |||
974 | if (count) { | ||
975 | radeon_asic_vm_set_page(rdev, last_pde, | ||
976 | last_pt, count, incr, | ||
977 | RADEON_VM_PAGE_VALID); | ||
978 | } | ||
979 | |||
980 | count = 1; | ||
981 | last_pde = pde; | ||
982 | last_pt = pt; | ||
983 | } else { | ||
984 | ++count; | ||
985 | } | ||
986 | } | ||
987 | |||
988 | if (count) { | ||
989 | radeon_asic_vm_set_page(rdev, last_pde, last_pt, count, | ||
990 | incr, RADEON_VM_PAGE_VALID); | ||
991 | |||
992 | } | ||
993 | |||
994 | return 0; | ||
995 | } | ||
996 | |||
997 | /** | ||
998 | * radeon_vm_update_ptes - make sure that page tables are valid | ||
999 | * | ||
1000 | * @rdev: radeon_device pointer | ||
1001 | * @vm: requested vm | ||
1002 | * @start: start of GPU address range | ||
1003 | * @end: end of GPU address range | ||
1004 | * @dst: destination address to map to | ||
1005 | * @flags: mapping flags | ||
1006 | * | ||
1007 | * Update the page tables in the range @start - @end (cayman+). | ||
1008 | * | ||
1009 | * Global and local mutex must be locked! | ||
1010 | */ | ||
1011 | static void radeon_vm_update_ptes(struct radeon_device *rdev, | ||
1012 | struct radeon_vm *vm, | ||
1013 | uint64_t start, uint64_t end, | ||
1014 | uint64_t dst, uint32_t flags) | ||
1015 | { | ||
1016 | static const uint64_t mask = RADEON_VM_PTE_COUNT - 1; | ||
1017 | |||
1018 | uint64_t last_pte = ~0, last_dst = ~0; | ||
1019 | unsigned count = 0; | ||
1020 | uint64_t addr; | ||
1021 | |||
1022 | start = start / RADEON_GPU_PAGE_SIZE; | ||
1023 | end = end / RADEON_GPU_PAGE_SIZE; | ||
1024 | |||
1025 | /* walk over the address space and update the page tables */ | ||
1026 | for (addr = start; addr < end; ) { | ||
1027 | uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE; | ||
1028 | unsigned nptes; | ||
1029 | uint64_t pte; | ||
1030 | |||
1031 | if ((addr & ~mask) == (end & ~mask)) | ||
1032 | nptes = end - addr; | ||
1033 | else | ||
1034 | nptes = RADEON_VM_PTE_COUNT - (addr & mask); | ||
1035 | |||
1036 | pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]); | ||
1037 | pte += (addr & mask) * 8; | ||
1038 | |||
1039 | if (((last_pte + 8 * count) != pte) || | ||
1040 | ((count + nptes) > 1 << 11)) { | ||
1041 | |||
1042 | if (count) { | ||
1043 | radeon_asic_vm_set_page(rdev, last_pte, | ||
1044 | last_dst, count, | ||
1045 | RADEON_GPU_PAGE_SIZE, | ||
1046 | flags); | ||
1047 | } | ||
1048 | |||
1049 | count = nptes; | ||
1050 | last_pte = pte; | ||
1051 | last_dst = dst; | ||
1052 | } else { | ||
1053 | count += nptes; | ||
1054 | } | ||
1055 | |||
1056 | addr += nptes; | ||
1057 | dst += nptes * RADEON_GPU_PAGE_SIZE; | ||
1058 | } | ||
1059 | |||
1060 | if (count) { | ||
1061 | radeon_asic_vm_set_page(rdev, last_pte, last_dst, count, | ||
1062 | RADEON_GPU_PAGE_SIZE, flags); | ||
1063 | } | ||
1064 | } | ||
1065 | |||
1066 | /** | ||
868 | * radeon_vm_bo_update_pte - map a bo into the vm page table | 1067 | * radeon_vm_bo_update_pte - map a bo into the vm page table |
869 | * | 1068 | * |
870 | * @rdev: radeon_device pointer | 1069 | * @rdev: radeon_device pointer |
@@ -887,12 +1086,11 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
887 | struct radeon_semaphore *sem = NULL; | 1086 | struct radeon_semaphore *sem = NULL; |
888 | struct radeon_bo_va *bo_va; | 1087 | struct radeon_bo_va *bo_va; |
889 | unsigned nptes, npdes, ndw; | 1088 | unsigned nptes, npdes, ndw; |
890 | uint64_t pe, addr; | 1089 | uint64_t addr; |
891 | uint64_t pfn; | ||
892 | int r; | 1090 | int r; |
893 | 1091 | ||
894 | /* nothing to do if vm isn't bound */ | 1092 | /* nothing to do if vm isn't bound */ |
895 | if (vm->sa_bo == NULL) | 1093 | if (vm->page_directory == NULL) |
896 | return 0; | 1094 | return 0; |
897 | 1095 | ||
898 | bo_va = radeon_vm_bo_find(vm, bo); | 1096 | bo_va = radeon_vm_bo_find(vm, bo); |
@@ -939,25 +1137,29 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
939 | } | 1137 | } |
940 | } | 1138 | } |
941 | 1139 | ||
942 | /* estimate number of dw needed */ | ||
943 | /* reserve space for 32-bit padding */ | ||
944 | ndw = 32; | ||
945 | |||
946 | nptes = radeon_bo_ngpu_pages(bo); | 1140 | nptes = radeon_bo_ngpu_pages(bo); |
947 | 1141 | ||
948 | pfn = (bo_va->soffset / RADEON_GPU_PAGE_SIZE); | 1142 | /* assume two extra pdes in case the mapping overlaps the borders */ |
1143 | npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; | ||
949 | 1144 | ||
950 | /* handle cases where a bo spans several pdes */ | 1145 | /* estimate number of dw needed */ |
951 | npdes = (ALIGN(pfn + nptes, RADEON_VM_PTE_COUNT) - | 1146 | /* semaphore, fence and padding */ |
952 | (pfn & ~(RADEON_VM_PTE_COUNT - 1))) >> RADEON_VM_BLOCK_SIZE; | 1147 | ndw = 32; |
1148 | |||
1149 | if (RADEON_VM_BLOCK_SIZE > 11) | ||
1150 | /* reserve space for one header for every 2k dwords */ | ||
1151 | ndw += (nptes >> 11) * 3; | ||
1152 | else | ||
1153 | /* reserve space for one header for | ||
1154 | every (1 << BLOCK_SIZE) entries */ | ||
1155 | ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3; | ||
953 | 1156 | ||
954 | /* reserve space for one header for every 2k dwords */ | ||
955 | ndw += (nptes >> 11) * 3; | ||
956 | /* reserve space for pte addresses */ | 1157 | /* reserve space for pte addresses */ |
957 | ndw += nptes * 2; | 1158 | ndw += nptes * 2; |
958 | 1159 | ||
959 | /* reserve space for one header for every 2k dwords */ | 1160 | /* reserve space for one header for every 2k dwords */ |
960 | ndw += (npdes >> 11) * 3; | 1161 | ndw += (npdes >> 11) * 3; |
1162 | |||
961 | /* reserve space for pde addresses */ | 1163 | /* reserve space for pde addresses */ |
962 | ndw += npdes * 2; | 1164 | ndw += npdes * 2; |
963 | 1165 | ||
@@ -971,22 +1173,14 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
971 | radeon_fence_note_sync(vm->fence, ridx); | 1173 | radeon_fence_note_sync(vm->fence, ridx); |
972 | } | 1174 | } |
973 | 1175 | ||
974 | /* update page table entries */ | 1176 | r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset); |
975 | pe = vm->pd_gpu_addr; | 1177 | if (r) { |
976 | pe += radeon_vm_directory_size(rdev); | 1178 | radeon_ring_unlock_undo(rdev, ring); |
977 | pe += (bo_va->soffset / RADEON_GPU_PAGE_SIZE) * 8; | 1179 | return r; |
978 | 1180 | } | |
979 | radeon_asic_vm_set_page(rdev, pe, addr, nptes, | ||
980 | RADEON_GPU_PAGE_SIZE, bo_va->flags); | ||
981 | |||
982 | /* update page directory entries */ | ||
983 | addr = pe; | ||
984 | |||
985 | pe = vm->pd_gpu_addr; | ||
986 | pe += ((bo_va->soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE) * 8; | ||
987 | 1181 | ||
988 | radeon_asic_vm_set_page(rdev, pe, addr, npdes, | 1182 | radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset, |
989 | RADEON_VM_PTE_COUNT * 8, RADEON_VM_PAGE_VALID); | 1183 | addr, bo_va->flags); |
990 | 1184 | ||
991 | radeon_fence_unref(&vm->fence); | 1185 | radeon_fence_unref(&vm->fence); |
992 | r = radeon_fence_emit(rdev, &vm->fence, ridx); | 1186 | r = radeon_fence_emit(rdev, &vm->fence, ridx); |
@@ -997,6 +1191,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, | |||
997 | radeon_ring_unlock_commit(rdev, ring); | 1191 | radeon_ring_unlock_commit(rdev, ring); |
998 | radeon_semaphore_free(rdev, &sem, vm->fence); | 1192 | radeon_semaphore_free(rdev, &sem, vm->fence); |
999 | radeon_fence_unref(&vm->last_flush); | 1193 | radeon_fence_unref(&vm->last_flush); |
1194 | |||
1000 | return 0; | 1195 | return 0; |
1001 | } | 1196 | } |
1002 | 1197 | ||
@@ -1056,31 +1251,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev, | |||
1056 | * @rdev: radeon_device pointer | 1251 | * @rdev: radeon_device pointer |
1057 | * @vm: requested vm | 1252 | * @vm: requested vm |
1058 | * | 1253 | * |
1059 | * Init @vm (cayman+). | 1254 | * Init @vm fields (cayman+). |
1060 | * Map the IB pool and any other shared objects into the VM | ||
1061 | * by default as it's used by all VMs. | ||
1062 | * Returns 0 for success, error for failure. | ||
1063 | */ | 1255 | */ |
1064 | int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) | 1256 | void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) |
1065 | { | 1257 | { |
1066 | struct radeon_bo_va *bo_va; | ||
1067 | int r; | ||
1068 | |||
1069 | vm->id = 0; | 1258 | vm->id = 0; |
1070 | vm->fence = NULL; | 1259 | vm->fence = NULL; |
1071 | vm->last_pfn = 0; | ||
1072 | mutex_init(&vm->mutex); | 1260 | mutex_init(&vm->mutex); |
1073 | INIT_LIST_HEAD(&vm->list); | 1261 | INIT_LIST_HEAD(&vm->list); |
1074 | INIT_LIST_HEAD(&vm->va); | 1262 | INIT_LIST_HEAD(&vm->va); |
1075 | |||
1076 | /* map the ib pool buffer at 0 in virtual address space, set | ||
1077 | * read only | ||
1078 | */ | ||
1079 | bo_va = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo); | ||
1080 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, | ||
1081 | RADEON_VM_PAGE_READABLE | | ||
1082 | RADEON_VM_PAGE_SNOOPED); | ||
1083 | return r; | ||
1084 | } | 1263 | } |
1085 | 1264 | ||
1086 | /** | 1265 | /** |
@@ -1102,17 +1281,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) | |||
1102 | radeon_vm_free_pt(rdev, vm); | 1281 | radeon_vm_free_pt(rdev, vm); |
1103 | mutex_unlock(&rdev->vm_manager.lock); | 1282 | mutex_unlock(&rdev->vm_manager.lock); |
1104 | 1283 | ||
1105 | /* remove all bo at this point non are busy any more because unbind | ||
1106 | * waited for the last vm fence to signal | ||
1107 | */ | ||
1108 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | ||
1109 | if (!r) { | ||
1110 | bo_va = radeon_vm_bo_find(vm, rdev->ring_tmp_bo.bo); | ||
1111 | list_del_init(&bo_va->bo_list); | ||
1112 | list_del_init(&bo_va->vm_list); | ||
1113 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | ||
1114 | kfree(bo_va); | ||
1115 | } | ||
1116 | if (!list_empty(&vm->va)) { | 1284 | if (!list_empty(&vm->va)) { |
1117 | dev_err(rdev->dev, "still active bo inside vm\n"); | 1285 | dev_err(rdev->dev, "still active bo inside vm\n"); |
1118 | } | 1286 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 83b8d8aa71c0..dc781c49b96b 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -419,6 +419,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
419 | /* new gpu have virtual address space support */ | 419 | /* new gpu have virtual address space support */ |
420 | if (rdev->family >= CHIP_CAYMAN) { | 420 | if (rdev->family >= CHIP_CAYMAN) { |
421 | struct radeon_fpriv *fpriv; | 421 | struct radeon_fpriv *fpriv; |
422 | struct radeon_bo_va *bo_va; | ||
422 | int r; | 423 | int r; |
423 | 424 | ||
424 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); | 425 | fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); |
@@ -426,7 +427,15 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) | |||
426 | return -ENOMEM; | 427 | return -ENOMEM; |
427 | } | 428 | } |
428 | 429 | ||
429 | r = radeon_vm_init(rdev, &fpriv->vm); | 430 | radeon_vm_init(rdev, &fpriv->vm); |
431 | |||
432 | /* map the ib pool buffer read only into | ||
433 | * virtual address space */ | ||
434 | bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, | ||
435 | rdev->ring_tmp_bo.bo); | ||
436 | r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, | ||
437 | RADEON_VM_PAGE_READABLE | | ||
438 | RADEON_VM_PAGE_SNOOPED); | ||
430 | if (r) { | 439 | if (r) { |
431 | radeon_vm_fini(rdev, &fpriv->vm); | 440 | radeon_vm_fini(rdev, &fpriv->vm); |
432 | kfree(fpriv); | 441 | kfree(fpriv); |
@@ -454,6 +463,17 @@ void radeon_driver_postclose_kms(struct drm_device *dev, | |||
454 | /* new gpu have virtual address space support */ | 463 | /* new gpu have virtual address space support */ |
455 | if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { | 464 | if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) { |
456 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | 465 | struct radeon_fpriv *fpriv = file_priv->driver_priv; |
466 | struct radeon_bo_va *bo_va; | ||
467 | int r; | ||
468 | |||
469 | r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); | ||
470 | if (!r) { | ||
471 | bo_va = radeon_vm_bo_find(&fpriv->vm, | ||
472 | rdev->ring_tmp_bo.bo); | ||
473 | if (bo_va) | ||
474 | radeon_vm_bo_rmv(rdev, bo_va); | ||
475 | radeon_bo_unreserve(rdev->ring_tmp_bo.bo); | ||
476 | } | ||
457 | 477 | ||
458 | radeon_vm_fini(rdev, &fpriv->vm); | 478 | radeon_vm_fini(rdev, &fpriv->vm); |
459 | kfree(fpriv); | 479 | kfree(fpriv); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 92487e614778..a13ad9d707cf 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
@@ -269,27 +269,6 @@ static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { | |||
269 | .disable = radeon_legacy_encoder_disable, | 269 | .disable = radeon_legacy_encoder_disable, |
270 | }; | 270 | }; |
271 | 271 | ||
272 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
273 | |||
274 | static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) | ||
275 | { | ||
276 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); | ||
277 | uint8_t level; | ||
278 | |||
279 | /* Convert brightness to hardware level */ | ||
280 | if (bd->props.brightness < 0) | ||
281 | level = 0; | ||
282 | else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) | ||
283 | level = RADEON_MAX_BL_LEVEL; | ||
284 | else | ||
285 | level = bd->props.brightness; | ||
286 | |||
287 | if (pdata->negative) | ||
288 | level = RADEON_MAX_BL_LEVEL - level; | ||
289 | |||
290 | return level; | ||
291 | } | ||
292 | |||
293 | u8 | 272 | u8 |
294 | radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) | 273 | radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder) |
295 | { | 274 | { |
@@ -331,6 +310,27 @@ radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 leve | |||
331 | radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); | 310 | radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode); |
332 | } | 311 | } |
333 | 312 | ||
313 | #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) | ||
314 | |||
315 | static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd) | ||
316 | { | ||
317 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); | ||
318 | uint8_t level; | ||
319 | |||
320 | /* Convert brightness to hardware level */ | ||
321 | if (bd->props.brightness < 0) | ||
322 | level = 0; | ||
323 | else if (bd->props.brightness > RADEON_MAX_BL_LEVEL) | ||
324 | level = RADEON_MAX_BL_LEVEL; | ||
325 | else | ||
326 | level = bd->props.brightness; | ||
327 | |||
328 | if (pdata->negative) | ||
329 | level = RADEON_MAX_BL_LEVEL - level; | ||
330 | |||
331 | return level; | ||
332 | } | ||
333 | |||
334 | static int radeon_legacy_backlight_update_status(struct backlight_device *bd) | 334 | static int radeon_legacy_backlight_update_status(struct backlight_device *bd) |
335 | { | 335 | { |
336 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); | 336 | struct radeon_backlight_privdata *pdata = bl_get_data(bd); |
@@ -991,11 +991,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, | |||
991 | static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) | 991 | static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) |
992 | { | 992 | { |
993 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 993 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
994 | struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; | 994 | /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ |
995 | if (tmds) { | ||
996 | if (tmds->i2c_bus) | ||
997 | radeon_i2c_destroy(tmds->i2c_bus); | ||
998 | } | ||
999 | kfree(radeon_encoder->enc_priv); | 995 | kfree(radeon_encoder->enc_priv); |
1000 | drm_encoder_cleanup(encoder); | 996 | drm_encoder_cleanup(encoder); |
1001 | kfree(radeon_encoder); | 997 | kfree(radeon_encoder); |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index bba66902c83b..47634f27f2e5 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -305,7 +305,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |||
305 | { | 305 | { |
306 | #if DRM_DEBUG_CODE | 306 | #if DRM_DEBUG_CODE |
307 | if (ring->count_dw <= 0) { | 307 | if (ring->count_dw <= 0) { |
308 | DRM_ERROR("radeon: writting more dword to ring than expected !\n"); | 308 | DRM_ERROR("radeon: writing more dwords to the ring than expected!\n"); |
309 | } | 309 | } |
310 | #endif | 310 | #endif |
311 | ring->ring[ring->wptr++] = v; | 311 | ring->ring[ring->wptr++] = v; |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index f79633a036c3..df8dd7701643 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -2407,12 +2407,13 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) | |||
2407 | WREG32(0x15DC, 0); | 2407 | WREG32(0x15DC, 0); |
2408 | 2408 | ||
2409 | /* empty context1-15 */ | 2409 | /* empty context1-15 */ |
2410 | /* FIXME start with 4G, once using 2 level pt switch to full | ||
2411 | * vm size space | ||
2412 | */ | ||
2413 | /* set vm size, must be a multiple of 4 */ | 2410 | /* set vm size, must be a multiple of 4 */ |
2414 | WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); | 2411 | WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
2415 | WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); | 2412 | WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); |
2413 | /* Assign the pt base to something valid for now; the pts used for | ||
2414 | * the VMs are determined by the application and setup and assigned | ||
2415 | * on the fly in the vm part of radeon_gart.c | ||
2416 | */ | ||
2416 | for (i = 1; i < 16; i++) { | 2417 | for (i = 1; i < 16; i++) { |
2417 | if (i < 8) | 2418 | if (i < 8) |
2418 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), | 2419 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), |