diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 207 |
1 files changed, 126 insertions, 81 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a0ac3c134b1b..2ffcf5a03551 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev) | |||
285 | } | 285 | } |
286 | } | 286 | } |
287 | } | 287 | } |
288 | r600_irq_set(rdev); | 288 | if (rdev->irq.installed) |
289 | r600_irq_set(rdev); | ||
289 | } | 290 | } |
290 | 291 | ||
291 | void r600_hpd_fini(struct radeon_device *rdev) | 292 | void r600_hpd_fini(struct radeon_device *rdev) |
@@ -623,7 +624,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
623 | fixed20_12 a; | 624 | fixed20_12 a; |
624 | u32 tmp; | 625 | u32 tmp; |
625 | int chansize, numchan; | 626 | int chansize, numchan; |
626 | int r; | ||
627 | 627 | ||
628 | /* Get VRAM informations */ | 628 | /* Get VRAM informations */ |
629 | rdev->mc.vram_is_ddr = true; | 629 | rdev->mc.vram_is_ddr = true; |
@@ -666,9 +666,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
666 | rdev->mc.real_vram_size = rdev->mc.aper_size; | 666 | rdev->mc.real_vram_size = rdev->mc.aper_size; |
667 | 667 | ||
668 | if (rdev->flags & RADEON_IS_AGP) { | 668 | if (rdev->flags & RADEON_IS_AGP) { |
669 | r = radeon_agp_init(rdev); | ||
670 | if (r) | ||
671 | return r; | ||
672 | /* gtt_size is setup by radeon_agp_init */ | 669 | /* gtt_size is setup by radeon_agp_init */ |
673 | rdev->mc.gtt_location = rdev->mc.agp_base; | 670 | rdev->mc.gtt_location = rdev->mc.agp_base; |
674 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; | 671 | tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; |
@@ -726,6 +723,10 @@ int r600_mc_init(struct radeon_device *rdev) | |||
726 | a.full = rfixed_const(100); | 723 | a.full = rfixed_const(100); |
727 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); | 724 | rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); |
728 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); | 725 | rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
726 | |||
727 | if (rdev->flags & RADEON_IS_IGP) | ||
728 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | ||
729 | |||
729 | return 0; | 730 | return 0; |
730 | } | 731 | } |
731 | 732 | ||
@@ -1384,11 +1385,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1384 | (void)RREG32(PCIE_PORT_DATA); | 1385 | (void)RREG32(PCIE_PORT_DATA); |
1385 | } | 1386 | } |
1386 | 1387 | ||
1387 | void r600_hdp_flush(struct radeon_device *rdev) | ||
1388 | { | ||
1389 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
1390 | } | ||
1391 | |||
1392 | /* | 1388 | /* |
1393 | * CP & Ring | 1389 | * CP & Ring |
1394 | */ | 1390 | */ |
@@ -1658,6 +1654,12 @@ void r600_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
1658 | rdev->cp.align_mask = 16 - 1; | 1654 | rdev->cp.align_mask = 16 - 1; |
1659 | } | 1655 | } |
1660 | 1656 | ||
1657 | void r600_cp_fini(struct radeon_device *rdev) | ||
1658 | { | ||
1659 | r600_cp_stop(rdev); | ||
1660 | radeon_ring_fini(rdev); | ||
1661 | } | ||
1662 | |||
1661 | 1663 | ||
1662 | /* | 1664 | /* |
1663 | * GPU scratch registers helpers function. | 1665 | * GPU scratch registers helpers function. |
@@ -1785,28 +1787,31 @@ void r600_fence_ring_emit(struct radeon_device *rdev, | |||
1785 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); | 1787 | radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); |
1786 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); | 1788 | radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); |
1787 | radeon_ring_write(rdev, fence->seq); | 1789 | radeon_ring_write(rdev, fence->seq); |
1790 | radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); | ||
1791 | radeon_ring_write(rdev, 1); | ||
1788 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ | 1792 | /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ |
1789 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); | 1793 | radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); |
1790 | radeon_ring_write(rdev, RB_INT_STAT); | 1794 | radeon_ring_write(rdev, RB_INT_STAT); |
1791 | } | 1795 | } |
1792 | 1796 | ||
1793 | int r600_copy_dma(struct radeon_device *rdev, | ||
1794 | uint64_t src_offset, | ||
1795 | uint64_t dst_offset, | ||
1796 | unsigned num_pages, | ||
1797 | struct radeon_fence *fence) | ||
1798 | { | ||
1799 | /* FIXME: implement */ | ||
1800 | return 0; | ||
1801 | } | ||
1802 | |||
1803 | int r600_copy_blit(struct radeon_device *rdev, | 1797 | int r600_copy_blit(struct radeon_device *rdev, |
1804 | uint64_t src_offset, uint64_t dst_offset, | 1798 | uint64_t src_offset, uint64_t dst_offset, |
1805 | unsigned num_pages, struct radeon_fence *fence) | 1799 | unsigned num_pages, struct radeon_fence *fence) |
1806 | { | 1800 | { |
1807 | r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | 1801 | int r; |
1802 | |||
1803 | mutex_lock(&rdev->r600_blit.mutex); | ||
1804 | rdev->r600_blit.vb_ib = NULL; | ||
1805 | r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE); | ||
1806 | if (r) { | ||
1807 | if (rdev->r600_blit.vb_ib) | ||
1808 | radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); | ||
1809 | mutex_unlock(&rdev->r600_blit.mutex); | ||
1810 | return r; | ||
1811 | } | ||
1808 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); | 1812 | r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE); |
1809 | r600_blit_done_copy(rdev, fence); | 1813 | r600_blit_done_copy(rdev, fence); |
1814 | mutex_unlock(&rdev->r600_blit.mutex); | ||
1810 | return 0; | 1815 | return 0; |
1811 | } | 1816 | } |
1812 | 1817 | ||
@@ -1862,26 +1867,25 @@ int r600_startup(struct radeon_device *rdev) | |||
1862 | return r; | 1867 | return r; |
1863 | } | 1868 | } |
1864 | r600_gpu_init(rdev); | 1869 | r600_gpu_init(rdev); |
1865 | 1870 | r = r600_blit_init(rdev); | |
1866 | if (!rdev->r600_blit.shader_obj) { | 1871 | if (r) { |
1867 | r = r600_blit_init(rdev); | 1872 | r600_blit_fini(rdev); |
1873 | rdev->asic->copy = NULL; | ||
1874 | dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); | ||
1875 | } | ||
1876 | /* pin copy shader into vram */ | ||
1877 | if (rdev->r600_blit.shader_obj) { | ||
1878 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1879 | if (unlikely(r != 0)) | ||
1880 | return r; | ||
1881 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
1882 | &rdev->r600_blit.shader_gpu_addr); | ||
1883 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1868 | if (r) { | 1884 | if (r) { |
1869 | DRM_ERROR("radeon: failed blitter (%d).\n", r); | 1885 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
1870 | return r; | 1886 | return r; |
1871 | } | 1887 | } |
1872 | } | 1888 | } |
1873 | |||
1874 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | ||
1875 | if (unlikely(r != 0)) | ||
1876 | return r; | ||
1877 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
1878 | &rdev->r600_blit.shader_gpu_addr); | ||
1879 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1880 | if (r) { | ||
1881 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | ||
1882 | return r; | ||
1883 | } | ||
1884 | |||
1885 | /* Enable IRQ */ | 1889 | /* Enable IRQ */ |
1886 | r = r600_irq_init(rdev); | 1890 | r = r600_irq_init(rdev); |
1887 | if (r) { | 1891 | if (r) { |
@@ -1946,6 +1950,13 @@ int r600_resume(struct radeon_device *rdev) | |||
1946 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | 1950 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
1947 | return r; | 1951 | return r; |
1948 | } | 1952 | } |
1953 | |||
1954 | r = r600_audio_init(rdev); | ||
1955 | if (r) { | ||
1956 | DRM_ERROR("radeon: audio resume failed\n"); | ||
1957 | return r; | ||
1958 | } | ||
1959 | |||
1949 | return r; | 1960 | return r; |
1950 | } | 1961 | } |
1951 | 1962 | ||
@@ -1953,17 +1964,21 @@ int r600_suspend(struct radeon_device *rdev) | |||
1953 | { | 1964 | { |
1954 | int r; | 1965 | int r; |
1955 | 1966 | ||
1967 | r600_audio_fini(rdev); | ||
1956 | /* FIXME: we should wait for ring to be empty */ | 1968 | /* FIXME: we should wait for ring to be empty */ |
1957 | r600_cp_stop(rdev); | 1969 | r600_cp_stop(rdev); |
1958 | rdev->cp.ready = false; | 1970 | rdev->cp.ready = false; |
1971 | r600_irq_suspend(rdev); | ||
1959 | r600_wb_disable(rdev); | 1972 | r600_wb_disable(rdev); |
1960 | r600_pcie_gart_disable(rdev); | 1973 | r600_pcie_gart_disable(rdev); |
1961 | /* unpin shaders bo */ | 1974 | /* unpin shaders bo */ |
1962 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); | 1975 | if (rdev->r600_blit.shader_obj) { |
1963 | if (unlikely(r != 0)) | 1976 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1964 | return r; | 1977 | if (!r) { |
1965 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | 1978 | radeon_bo_unpin(rdev->r600_blit.shader_obj); |
1966 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | 1979 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); |
1980 | } | ||
1981 | } | ||
1967 | return 0; | 1982 | return 0; |
1968 | } | 1983 | } |
1969 | 1984 | ||
@@ -2024,6 +2039,11 @@ int r600_init(struct radeon_device *rdev) | |||
2024 | r = radeon_fence_driver_init(rdev); | 2039 | r = radeon_fence_driver_init(rdev); |
2025 | if (r) | 2040 | if (r) |
2026 | return r; | 2041 | return r; |
2042 | if (rdev->flags & RADEON_IS_AGP) { | ||
2043 | r = radeon_agp_init(rdev); | ||
2044 | if (r) | ||
2045 | radeon_agp_disable(rdev); | ||
2046 | } | ||
2027 | r = r600_mc_init(rdev); | 2047 | r = r600_mc_init(rdev); |
2028 | if (r) | 2048 | if (r) |
2029 | return r; | 2049 | return r; |
@@ -2049,22 +2069,25 @@ int r600_init(struct radeon_device *rdev) | |||
2049 | rdev->accel_working = true; | 2069 | rdev->accel_working = true; |
2050 | r = r600_startup(rdev); | 2070 | r = r600_startup(rdev); |
2051 | if (r) { | 2071 | if (r) { |
2052 | r600_suspend(rdev); | 2072 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2073 | r600_cp_fini(rdev); | ||
2053 | r600_wb_fini(rdev); | 2074 | r600_wb_fini(rdev); |
2054 | radeon_ring_fini(rdev); | 2075 | r600_irq_fini(rdev); |
2076 | radeon_irq_kms_fini(rdev); | ||
2055 | r600_pcie_gart_fini(rdev); | 2077 | r600_pcie_gart_fini(rdev); |
2056 | rdev->accel_working = false; | 2078 | rdev->accel_working = false; |
2057 | } | 2079 | } |
2058 | if (rdev->accel_working) { | 2080 | if (rdev->accel_working) { |
2059 | r = radeon_ib_pool_init(rdev); | 2081 | r = radeon_ib_pool_init(rdev); |
2060 | if (r) { | 2082 | if (r) { |
2061 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | 2083 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); |
2062 | rdev->accel_working = false; | ||
2063 | } | ||
2064 | r = r600_ib_test(rdev); | ||
2065 | if (r) { | ||
2066 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
2067 | rdev->accel_working = false; | 2084 | rdev->accel_working = false; |
2085 | } else { | ||
2086 | r = r600_ib_test(rdev); | ||
2087 | if (r) { | ||
2088 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
2089 | rdev->accel_working = false; | ||
2090 | } | ||
2068 | } | 2091 | } |
2069 | } | 2092 | } |
2070 | 2093 | ||
@@ -2076,21 +2099,17 @@ int r600_init(struct radeon_device *rdev) | |||
2076 | 2099 | ||
2077 | void r600_fini(struct radeon_device *rdev) | 2100 | void r600_fini(struct radeon_device *rdev) |
2078 | { | 2101 | { |
2079 | /* Suspend operations */ | ||
2080 | r600_suspend(rdev); | ||
2081 | |||
2082 | r600_audio_fini(rdev); | 2102 | r600_audio_fini(rdev); |
2083 | r600_blit_fini(rdev); | 2103 | r600_blit_fini(rdev); |
2104 | r600_cp_fini(rdev); | ||
2105 | r600_wb_fini(rdev); | ||
2084 | r600_irq_fini(rdev); | 2106 | r600_irq_fini(rdev); |
2085 | radeon_irq_kms_fini(rdev); | 2107 | radeon_irq_kms_fini(rdev); |
2086 | radeon_ring_fini(rdev); | ||
2087 | r600_wb_fini(rdev); | ||
2088 | r600_pcie_gart_fini(rdev); | 2108 | r600_pcie_gart_fini(rdev); |
2109 | radeon_agp_fini(rdev); | ||
2089 | radeon_gem_fini(rdev); | 2110 | radeon_gem_fini(rdev); |
2090 | radeon_fence_driver_fini(rdev); | 2111 | radeon_fence_driver_fini(rdev); |
2091 | radeon_clocks_fini(rdev); | 2112 | radeon_clocks_fini(rdev); |
2092 | if (rdev->flags & RADEON_IS_AGP) | ||
2093 | radeon_agp_fini(rdev); | ||
2094 | radeon_bo_fini(rdev); | 2113 | radeon_bo_fini(rdev); |
2095 | radeon_atombios_fini(rdev); | 2114 | radeon_atombios_fini(rdev); |
2096 | kfree(rdev->bios); | 2115 | kfree(rdev->bios); |
@@ -2196,14 +2215,14 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size) | |||
2196 | rb_bufsz = drm_order(ring_size / 4); | 2215 | rb_bufsz = drm_order(ring_size / 4); |
2197 | ring_size = (1 << rb_bufsz) * 4; | 2216 | ring_size = (1 << rb_bufsz) * 4; |
2198 | rdev->ih.ring_size = ring_size; | 2217 | rdev->ih.ring_size = ring_size; |
2199 | rdev->ih.align_mask = 4 - 1; | 2218 | rdev->ih.ptr_mask = rdev->ih.ring_size - 1; |
2219 | rdev->ih.rptr = 0; | ||
2200 | } | 2220 | } |
2201 | 2221 | ||
2202 | static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | 2222 | static int r600_ih_ring_alloc(struct radeon_device *rdev) |
2203 | { | 2223 | { |
2204 | int r; | 2224 | int r; |
2205 | 2225 | ||
2206 | rdev->ih.ring_size = ring_size; | ||
2207 | /* Allocate ring buffer */ | 2226 | /* Allocate ring buffer */ |
2208 | if (rdev->ih.ring_obj == NULL) { | 2227 | if (rdev->ih.ring_obj == NULL) { |
2209 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, | 2228 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, |
@@ -2233,9 +2252,6 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | |||
2233 | return r; | 2252 | return r; |
2234 | } | 2253 | } |
2235 | } | 2254 | } |
2236 | rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1; | ||
2237 | rdev->ih.rptr = 0; | ||
2238 | |||
2239 | return 0; | 2255 | return 0; |
2240 | } | 2256 | } |
2241 | 2257 | ||
@@ -2385,7 +2401,7 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2385 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; | 2401 | u32 interrupt_cntl, ih_cntl, ih_rb_cntl; |
2386 | 2402 | ||
2387 | /* allocate ring */ | 2403 | /* allocate ring */ |
2388 | ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size); | 2404 | ret = r600_ih_ring_alloc(rdev); |
2389 | if (ret) | 2405 | if (ret) |
2390 | return ret; | 2406 | return ret; |
2391 | 2407 | ||
@@ -2448,10 +2464,15 @@ int r600_irq_init(struct radeon_device *rdev) | |||
2448 | return ret; | 2464 | return ret; |
2449 | } | 2465 | } |
2450 | 2466 | ||
2451 | void r600_irq_fini(struct radeon_device *rdev) | 2467 | void r600_irq_suspend(struct radeon_device *rdev) |
2452 | { | 2468 | { |
2453 | r600_disable_interrupts(rdev); | 2469 | r600_disable_interrupts(rdev); |
2454 | r600_rlc_stop(rdev); | 2470 | r600_rlc_stop(rdev); |
2471 | } | ||
2472 | |||
2473 | void r600_irq_fini(struct radeon_device *rdev) | ||
2474 | { | ||
2475 | r600_irq_suspend(rdev); | ||
2455 | r600_ih_ring_fini(rdev); | 2476 | r600_ih_ring_fini(rdev); |
2456 | } | 2477 | } |
2457 | 2478 | ||
@@ -2461,9 +2482,17 @@ int r600_irq_set(struct radeon_device *rdev) | |||
2461 | u32 mode_int = 0; | 2482 | u32 mode_int = 0; |
2462 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; | 2483 | u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; |
2463 | 2484 | ||
2485 | if (!rdev->irq.installed) { | ||
2486 | WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); | ||
2487 | return -EINVAL; | ||
2488 | } | ||
2464 | /* don't enable anything if the ih is disabled */ | 2489 | /* don't enable anything if the ih is disabled */ |
2465 | if (!rdev->ih.enabled) | 2490 | if (!rdev->ih.enabled) { |
2491 | r600_disable_interrupts(rdev); | ||
2492 | /* force the active interrupt state to all disabled */ | ||
2493 | r600_disable_interrupt_state(rdev); | ||
2466 | return 0; | 2494 | return 0; |
2495 | } | ||
2467 | 2496 | ||
2468 | if (ASIC_IS_DCE3(rdev)) { | 2497 | if (ASIC_IS_DCE3(rdev)) { |
2469 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; | 2498 | hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; |
@@ -2633,16 +2662,18 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev) | |||
2633 | wptr = RREG32(IH_RB_WPTR); | 2662 | wptr = RREG32(IH_RB_WPTR); |
2634 | 2663 | ||
2635 | if (wptr & RB_OVERFLOW) { | 2664 | if (wptr & RB_OVERFLOW) { |
2636 | WARN_ON(1); | 2665 | /* When a ring buffer overflow happen start parsing interrupt |
2637 | /* XXX deal with overflow */ | 2666 | * from the last not overwritten vector (wptr + 16). Hopefully |
2638 | DRM_ERROR("IH RB overflow\n"); | 2667 | * this should allow us to catchup. |
2668 | */ | ||
2669 | dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", | ||
2670 | wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); | ||
2671 | rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; | ||
2639 | tmp = RREG32(IH_RB_CNTL); | 2672 | tmp = RREG32(IH_RB_CNTL); |
2640 | tmp |= IH_WPTR_OVERFLOW_CLEAR; | 2673 | tmp |= IH_WPTR_OVERFLOW_CLEAR; |
2641 | WREG32(IH_RB_CNTL, tmp); | 2674 | WREG32(IH_RB_CNTL, tmp); |
2642 | } | 2675 | } |
2643 | wptr = wptr & WPTR_OFFSET_MASK; | 2676 | return (wptr & rdev->ih.ptr_mask); |
2644 | |||
2645 | return wptr; | ||
2646 | } | 2677 | } |
2647 | 2678 | ||
2648 | /* r600 IV Ring | 2679 | /* r600 IV Ring |
@@ -2678,12 +2709,13 @@ int r600_irq_process(struct radeon_device *rdev) | |||
2678 | u32 wptr = r600_get_ih_wptr(rdev); | 2709 | u32 wptr = r600_get_ih_wptr(rdev); |
2679 | u32 rptr = rdev->ih.rptr; | 2710 | u32 rptr = rdev->ih.rptr; |
2680 | u32 src_id, src_data; | 2711 | u32 src_id, src_data; |
2681 | u32 last_entry = rdev->ih.ring_size - 16; | ||
2682 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; | 2712 | u32 ring_index, disp_int, disp_int_cont, disp_int_cont2; |
2683 | unsigned long flags; | 2713 | unsigned long flags; |
2684 | bool queue_hotplug = false; | 2714 | bool queue_hotplug = false; |
2685 | 2715 | ||
2686 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); | 2716 | DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); |
2717 | if (!rdev->ih.enabled) | ||
2718 | return IRQ_NONE; | ||
2687 | 2719 | ||
2688 | spin_lock_irqsave(&rdev->ih.lock, flags); | 2720 | spin_lock_irqsave(&rdev->ih.lock, flags); |
2689 | 2721 | ||
@@ -2724,7 +2756,7 @@ restart_ih: | |||
2724 | } | 2756 | } |
2725 | break; | 2757 | break; |
2726 | default: | 2758 | default: |
2727 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | 2759 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
2728 | break; | 2760 | break; |
2729 | } | 2761 | } |
2730 | break; | 2762 | break; |
@@ -2744,7 +2776,7 @@ restart_ih: | |||
2744 | } | 2776 | } |
2745 | break; | 2777 | break; |
2746 | default: | 2778 | default: |
2747 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | 2779 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
2748 | break; | 2780 | break; |
2749 | } | 2781 | } |
2750 | break; | 2782 | break; |
@@ -2793,7 +2825,7 @@ restart_ih: | |||
2793 | } | 2825 | } |
2794 | break; | 2826 | break; |
2795 | default: | 2827 | default: |
2796 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | 2828 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
2797 | break; | 2829 | break; |
2798 | } | 2830 | } |
2799 | break; | 2831 | break; |
@@ -2807,15 +2839,13 @@ restart_ih: | |||
2807 | DRM_DEBUG("IH: CP EOP\n"); | 2839 | DRM_DEBUG("IH: CP EOP\n"); |
2808 | break; | 2840 | break; |
2809 | default: | 2841 | default: |
2810 | DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); | 2842 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
2811 | break; | 2843 | break; |
2812 | } | 2844 | } |
2813 | 2845 | ||
2814 | /* wptr/rptr are in bytes! */ | 2846 | /* wptr/rptr are in bytes! */ |
2815 | if (rptr == last_entry) | 2847 | rptr += 16; |
2816 | rptr = 0; | 2848 | rptr &= rdev->ih.ptr_mask; |
2817 | else | ||
2818 | rptr += 16; | ||
2819 | } | 2849 | } |
2820 | /* make sure wptr hasn't changed while processing */ | 2850 | /* make sure wptr hasn't changed while processing */ |
2821 | wptr = r600_get_ih_wptr(rdev); | 2851 | wptr = r600_get_ih_wptr(rdev); |
@@ -2883,3 +2913,18 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) | |||
2883 | return 0; | 2913 | return 0; |
2884 | #endif | 2914 | #endif |
2885 | } | 2915 | } |
2916 | |||
2917 | /** | ||
2918 | * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl | ||
2919 | * rdev: radeon device structure | ||
2920 | * bo: buffer object struct which userspace is waiting for idle | ||
2921 | * | ||
2922 | * Some R6XX/R7XX doesn't seems to take into account HDP flush performed | ||
2923 | * through ring buffer, this leads to corruption in rendering, see | ||
2924 | * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we | ||
2925 | * directly perform HDP flush by writing register through MMIO. | ||
2926 | */ | ||
2927 | void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) | ||
2928 | { | ||
2929 | WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); | ||
2930 | } | ||