diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/si.c')
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 113 |
1 files changed, 76 insertions, 37 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d883cae56378..f79633a036c3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -1806,13 +1806,14 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) | |||
1806 | #endif | 1806 | #endif |
1807 | (ib->gpu_addr & 0xFFFFFFFC)); | 1807 | (ib->gpu_addr & 0xFFFFFFFC)); |
1808 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); | 1808 | radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); |
1809 | radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24)); | 1809 | radeon_ring_write(ring, ib->length_dw | |
1810 | (ib->vm ? (ib->vm->id << 24) : 0)); | ||
1810 | 1811 | ||
1811 | if (!ib->is_const_ib) { | 1812 | if (!ib->is_const_ib) { |
1812 | /* flush read cache over gart for this vmid */ | 1813 | /* flush read cache over gart for this vmid */ |
1813 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 1814 | radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
1814 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); | 1815 | radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); |
1815 | radeon_ring_write(ring, ib->vm_id); | 1816 | radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); |
1816 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); | 1817 | radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); |
1817 | radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | | 1818 | radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | |
1818 | PACKET3_TC_ACTION_ENA | | 1819 | PACKET3_TC_ACTION_ENA | |
@@ -2363,7 +2364,7 @@ void si_pcie_gart_tlb_flush(struct radeon_device *rdev) | |||
2363 | WREG32(VM_INVALIDATE_REQUEST, 1); | 2364 | WREG32(VM_INVALIDATE_REQUEST, 1); |
2364 | } | 2365 | } |
2365 | 2366 | ||
2366 | int si_pcie_gart_enable(struct radeon_device *rdev) | 2367 | static int si_pcie_gart_enable(struct radeon_device *rdev) |
2367 | { | 2368 | { |
2368 | int r, i; | 2369 | int r, i; |
2369 | 2370 | ||
@@ -2425,7 +2426,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev) | |||
2425 | WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, | 2426 | WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, |
2426 | (u32)(rdev->dummy_page.addr >> 12)); | 2427 | (u32)(rdev->dummy_page.addr >> 12)); |
2427 | WREG32(VM_CONTEXT1_CNTL2, 0); | 2428 | WREG32(VM_CONTEXT1_CNTL2, 0); |
2428 | WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | | 2429 | WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | |
2429 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); | 2430 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); |
2430 | 2431 | ||
2431 | si_pcie_gart_tlb_flush(rdev); | 2432 | si_pcie_gart_tlb_flush(rdev); |
@@ -2436,7 +2437,7 @@ int si_pcie_gart_enable(struct radeon_device *rdev) | |||
2436 | return 0; | 2437 | return 0; |
2437 | } | 2438 | } |
2438 | 2439 | ||
2439 | void si_pcie_gart_disable(struct radeon_device *rdev) | 2440 | static void si_pcie_gart_disable(struct radeon_device *rdev) |
2440 | { | 2441 | { |
2441 | /* Disable all tables */ | 2442 | /* Disable all tables */ |
2442 | WREG32(VM_CONTEXT0_CNTL, 0); | 2443 | WREG32(VM_CONTEXT0_CNTL, 0); |
@@ -2455,7 +2456,7 @@ void si_pcie_gart_disable(struct radeon_device *rdev) | |||
2455 | radeon_gart_table_vram_unpin(rdev); | 2456 | radeon_gart_table_vram_unpin(rdev); |
2456 | } | 2457 | } |
2457 | 2458 | ||
2458 | void si_pcie_gart_fini(struct radeon_device *rdev) | 2459 | static void si_pcie_gart_fini(struct radeon_device *rdev) |
2459 | { | 2460 | { |
2460 | si_pcie_gart_disable(rdev); | 2461 | si_pcie_gart_disable(rdev); |
2461 | radeon_gart_table_vram_free(rdev); | 2462 | radeon_gart_table_vram_free(rdev); |
@@ -2788,41 +2789,84 @@ void si_vm_fini(struct radeon_device *rdev) | |||
2788 | { | 2789 | { |
2789 | } | 2790 | } |
2790 | 2791 | ||
2791 | int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id) | 2792 | /** |
2793 | * si_vm_set_page - update the page tables using the CP | ||
2794 | * | ||
2795 | * @rdev: radeon_device pointer | ||
2796 | * @pe: addr of the page entry | ||
2797 | * @addr: dst addr to write into pe | ||
2798 | * @count: number of page entries to update | ||
2799 | * @incr: increase next addr by incr bytes | ||
2800 | * @flags: access flags | ||
2801 | * | ||
2802 | * Update the page tables using the CP (cayman-si). | ||
2803 | */ | ||
2804 | void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, | ||
2805 | uint64_t addr, unsigned count, | ||
2806 | uint32_t incr, uint32_t flags) | ||
2792 | { | 2807 | { |
2793 | if (id < 8) | 2808 | struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; |
2794 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12); | 2809 | uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); |
2795 | else | 2810 | int i; |
2796 | WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((id - 8) << 2), | 2811 | uint64_t value; |
2797 | vm->pt_gpu_addr >> 12); | 2812 | |
2798 | /* flush hdp cache */ | 2813 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2)); |
2799 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | 2814 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | |
2800 | /* bits 0-15 are the VM contexts0-15 */ | 2815 | WRITE_DATA_DST_SEL(1))); |
2801 | WREG32(VM_INVALIDATE_REQUEST, 1 << id); | 2816 | radeon_ring_write(ring, pe); |
2802 | return 0; | 2817 | radeon_ring_write(ring, upper_32_bits(pe)); |
2818 | for (i = 0; i < count; ++i) { | ||
2819 | if (flags & RADEON_VM_PAGE_SYSTEM) { | ||
2820 | value = radeon_vm_map_gart(rdev, addr); | ||
2821 | value &= 0xFFFFFFFFFFFFF000ULL; | ||
2822 | } else if (flags & RADEON_VM_PAGE_VALID) | ||
2823 | value = addr; | ||
2824 | else | ||
2825 | value = 0; | ||
2826 | addr += incr; | ||
2827 | value |= r600_flags; | ||
2828 | radeon_ring_write(ring, value); | ||
2829 | radeon_ring_write(ring, upper_32_bits(value)); | ||
2830 | } | ||
2803 | } | 2831 | } |
2804 | 2832 | ||
2805 | void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm) | 2833 | void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) |
2806 | { | 2834 | { |
2807 | if (vm->id < 8) | 2835 | struct radeon_ring *ring = &rdev->ring[ridx]; |
2808 | WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0); | ||
2809 | else | ||
2810 | WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2), 0); | ||
2811 | /* flush hdp cache */ | ||
2812 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
2813 | /* bits 0-15 are the VM contexts0-15 */ | ||
2814 | WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); | ||
2815 | } | ||
2816 | 2836 | ||
2817 | void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm) | 2837 | if (vm == NULL) |
2818 | { | ||
2819 | if (vm->id == -1) | ||
2820 | return; | 2838 | return; |
2821 | 2839 | ||
2840 | /* write new base address */ | ||
2841 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); | ||
2842 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
2843 | WRITE_DATA_DST_SEL(0))); | ||
2844 | |||
2845 | if (vm->id < 8) { | ||
2846 | radeon_ring_write(ring, | ||
2847 | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); | ||
2848 | } else { | ||
2849 | radeon_ring_write(ring, | ||
2850 | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); | ||
2851 | } | ||
2852 | radeon_ring_write(ring, 0); | ||
2853 | radeon_ring_write(ring, vm->pd_gpu_addr >> 12); | ||
2854 | |||
2822 | /* flush hdp cache */ | 2855 | /* flush hdp cache */ |
2823 | WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | 2856 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
2857 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
2858 | WRITE_DATA_DST_SEL(0))); | ||
2859 | radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2); | ||
2860 | radeon_ring_write(ring, 0); | ||
2861 | radeon_ring_write(ring, 0x1); | ||
2862 | |||
2824 | /* bits 0-15 are the VM contexts0-15 */ | 2863 | /* bits 0-15 are the VM contexts0-15 */ |
2825 | WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id); | 2864 | radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); |
2865 | radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | | ||
2866 | WRITE_DATA_DST_SEL(0))); | ||
2867 | radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); | ||
2868 | radeon_ring_write(ring, 0); | ||
2869 | radeon_ring_write(ring, 1 << vm->id); | ||
2826 | } | 2870 | } |
2827 | 2871 | ||
2828 | /* | 2872 | /* |
@@ -3199,10 +3243,6 @@ int si_irq_set(struct radeon_device *rdev) | |||
3199 | DRM_DEBUG("si_irq_set: hpd 6\n"); | 3243 | DRM_DEBUG("si_irq_set: hpd 6\n"); |
3200 | hpd6 |= DC_HPDx_INT_EN; | 3244 | hpd6 |= DC_HPDx_INT_EN; |
3201 | } | 3245 | } |
3202 | if (rdev->irq.gui_idle) { | ||
3203 | DRM_DEBUG("gui idle\n"); | ||
3204 | grbm_int_cntl |= GUI_IDLE_INT_ENABLE; | ||
3205 | } | ||
3206 | 3246 | ||
3207 | WREG32(CP_INT_CNTL_RING0, cp_int_cntl); | 3247 | WREG32(CP_INT_CNTL_RING0, cp_int_cntl); |
3208 | WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); | 3248 | WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); |
@@ -3658,7 +3698,6 @@ restart_ih: | |||
3658 | break; | 3698 | break; |
3659 | case 233: /* GUI IDLE */ | 3699 | case 233: /* GUI IDLE */ |
3660 | DRM_DEBUG("IH: GUI idle\n"); | 3700 | DRM_DEBUG("IH: GUI idle\n"); |
3661 | wake_up(&rdev->irq.idle_queue); | ||
3662 | break; | 3701 | break; |
3663 | default: | 3702 | default: |
3664 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | 3703 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |