diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
| -rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 308 |
1 files changed, 280 insertions, 28 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 14313ad43b76..061fa0a28900 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
| @@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav | |||
| 1330 | break; | 1330 | break; |
| 1331 | udelay(1); | 1331 | udelay(1); |
| 1332 | } | 1332 | } |
| 1333 | } else { | ||
| 1334 | save->crtc_enabled[i] = false; | ||
| 1333 | } | 1335 | } |
| 1334 | } | 1336 | } |
| 1335 | 1337 | ||
| @@ -1372,7 +1374,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s | |||
| 1372 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); | 1374 | WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); |
| 1373 | 1375 | ||
| 1374 | for (i = 0; i < rdev->num_crtc; i++) { | 1376 | for (i = 0; i < rdev->num_crtc; i++) { |
| 1375 | if (save->crtc_enabled) { | 1377 | if (save->crtc_enabled[i]) { |
| 1376 | if (ASIC_IS_DCE6(rdev)) { | 1378 | if (ASIC_IS_DCE6(rdev)) { |
| 1377 | tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); | 1379 | tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); |
| 1378 | tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; | 1380 | tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; |
| @@ -1648,7 +1650,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev) | |||
| 1648 | ring->wptr = 0; | 1650 | ring->wptr = 0; |
| 1649 | WREG32(CP_RB_WPTR, ring->wptr); | 1651 | WREG32(CP_RB_WPTR, ring->wptr); |
| 1650 | 1652 | ||
| 1651 | /* set the wb address wether it's enabled or not */ | 1653 | /* set the wb address whether it's enabled or not */ |
| 1652 | WREG32(CP_RB_RPTR_ADDR, | 1654 | WREG32(CP_RB_RPTR_ADDR, |
| 1653 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); | 1655 | ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); |
| 1654 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); | 1656 | WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); |
| @@ -1819,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1819 | case CHIP_SUMO: | 1821 | case CHIP_SUMO: |
| 1820 | rdev->config.evergreen.num_ses = 1; | 1822 | rdev->config.evergreen.num_ses = 1; |
| 1821 | rdev->config.evergreen.max_pipes = 4; | 1823 | rdev->config.evergreen.max_pipes = 4; |
| 1822 | rdev->config.evergreen.max_tile_pipes = 2; | 1824 | rdev->config.evergreen.max_tile_pipes = 4; |
| 1823 | if (rdev->pdev->device == 0x9648) | 1825 | if (rdev->pdev->device == 0x9648) |
| 1824 | rdev->config.evergreen.max_simds = 3; | 1826 | rdev->config.evergreen.max_simds = 3; |
| 1825 | else if ((rdev->pdev->device == 0x9647) || | 1827 | else if ((rdev->pdev->device == 0x9647) || |
| @@ -1842,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1842 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1844 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
| 1843 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1845 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
| 1844 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1846 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
| 1845 | gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; | 1847 | gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN; |
| 1846 | break; | 1848 | break; |
| 1847 | case CHIP_SUMO2: | 1849 | case CHIP_SUMO2: |
| 1848 | rdev->config.evergreen.num_ses = 1; | 1850 | rdev->config.evergreen.num_ses = 1; |
| @@ -1864,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1864 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1866 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
| 1865 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1867 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
| 1866 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1868 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
| 1867 | gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; | 1869 | gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN; |
| 1868 | break; | 1870 | break; |
| 1869 | case CHIP_BARTS: | 1871 | case CHIP_BARTS: |
| 1870 | rdev->config.evergreen.num_ses = 2; | 1872 | rdev->config.evergreen.num_ses = 2; |
| @@ -1912,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 1912 | break; | 1914 | break; |
| 1913 | case CHIP_CAICOS: | 1915 | case CHIP_CAICOS: |
| 1914 | rdev->config.evergreen.num_ses = 1; | 1916 | rdev->config.evergreen.num_ses = 1; |
| 1915 | rdev->config.evergreen.max_pipes = 4; | 1917 | rdev->config.evergreen.max_pipes = 2; |
| 1916 | rdev->config.evergreen.max_tile_pipes = 2; | 1918 | rdev->config.evergreen.max_tile_pipes = 2; |
| 1917 | rdev->config.evergreen.max_simds = 2; | 1919 | rdev->config.evergreen.max_simds = 2; |
| 1918 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; | 1920 | rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; |
| @@ -2032,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
| 2032 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 2034 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
| 2033 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2035 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
| 2034 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2036 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
| 2037 | WREG32(DMA_TILING_CONFIG, gb_addr_config); | ||
| 2035 | 2038 | ||
| 2036 | tmp = gb_addr_config & NUM_PIPES_MASK; | 2039 | tmp = gb_addr_config & NUM_PIPES_MASK; |
| 2037 | tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, | 2040 | tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, |
| @@ -2303,22 +2306,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin | |||
| 2303 | return radeon_ring_test_lockup(rdev, ring); | 2306 | return radeon_ring_test_lockup(rdev, ring); |
| 2304 | } | 2307 | } |
| 2305 | 2308 | ||
| 2306 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | 2309 | static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev) |
| 2307 | { | 2310 | { |
| 2308 | struct evergreen_mc_save save; | ||
| 2309 | u32 grbm_reset = 0; | 2311 | u32 grbm_reset = 0; |
| 2310 | 2312 | ||
| 2311 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) | 2313 | if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) |
| 2312 | return 0; | 2314 | return; |
| 2313 | 2315 | ||
| 2314 | dev_info(rdev->dev, "GPU softreset \n"); | 2316 | dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", |
| 2315 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | ||
| 2316 | RREG32(GRBM_STATUS)); | 2317 | RREG32(GRBM_STATUS)); |
| 2317 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | 2318 | dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", |
| 2318 | RREG32(GRBM_STATUS_SE0)); | 2319 | RREG32(GRBM_STATUS_SE0)); |
| 2319 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | 2320 | dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", |
| 2320 | RREG32(GRBM_STATUS_SE1)); | 2321 | RREG32(GRBM_STATUS_SE1)); |
| 2321 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | 2322 | dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", |
| 2322 | RREG32(SRBM_STATUS)); | 2323 | RREG32(SRBM_STATUS)); |
| 2323 | dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", | 2324 | dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", |
| 2324 | RREG32(CP_STALLED_STAT1)); | 2325 | RREG32(CP_STALLED_STAT1)); |
| @@ -2328,10 +2329,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
| 2328 | RREG32(CP_BUSY_STAT)); | 2329 | RREG32(CP_BUSY_STAT)); |
| 2329 | dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", | 2330 | dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
| 2330 | RREG32(CP_STAT)); | 2331 | RREG32(CP_STAT)); |
| 2331 | evergreen_mc_stop(rdev, &save); | 2332 | |
| 2332 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
| 2333 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
| 2334 | } | ||
| 2335 | /* Disable CP parsing/prefetching */ | 2333 | /* Disable CP parsing/prefetching */ |
| 2336 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); | 2334 | WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); |
| 2337 | 2335 | ||
| @@ -2355,15 +2353,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
| 2355 | udelay(50); | 2353 | udelay(50); |
| 2356 | WREG32(GRBM_SOFT_RESET, 0); | 2354 | WREG32(GRBM_SOFT_RESET, 0); |
| 2357 | (void)RREG32(GRBM_SOFT_RESET); | 2355 | (void)RREG32(GRBM_SOFT_RESET); |
| 2358 | /* Wait a little for things to settle down */ | 2356 | |
| 2359 | udelay(50); | 2357 | dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", |
| 2360 | dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", | ||
| 2361 | RREG32(GRBM_STATUS)); | 2358 | RREG32(GRBM_STATUS)); |
| 2362 | dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", | 2359 | dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", |
| 2363 | RREG32(GRBM_STATUS_SE0)); | 2360 | RREG32(GRBM_STATUS_SE0)); |
| 2364 | dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", | 2361 | dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", |
| 2365 | RREG32(GRBM_STATUS_SE1)); | 2362 | RREG32(GRBM_STATUS_SE1)); |
| 2366 | dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", | 2363 | dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", |
| 2367 | RREG32(SRBM_STATUS)); | 2364 | RREG32(SRBM_STATUS)); |
| 2368 | dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", | 2365 | dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", |
| 2369 | RREG32(CP_STALLED_STAT1)); | 2366 | RREG32(CP_STALLED_STAT1)); |
| @@ -2373,13 +2370,65 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev) | |||
| 2373 | RREG32(CP_BUSY_STAT)); | 2370 | RREG32(CP_BUSY_STAT)); |
| 2374 | dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", | 2371 | dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", |
| 2375 | RREG32(CP_STAT)); | 2372 | RREG32(CP_STAT)); |
| 2373 | } | ||
| 2374 | |||
| 2375 | static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev) | ||
| 2376 | { | ||
| 2377 | u32 tmp; | ||
| 2378 | |||
| 2379 | if (RREG32(DMA_STATUS_REG) & DMA_IDLE) | ||
| 2380 | return; | ||
| 2381 | |||
| 2382 | dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", | ||
| 2383 | RREG32(DMA_STATUS_REG)); | ||
| 2384 | |||
| 2385 | /* Disable DMA */ | ||
| 2386 | tmp = RREG32(DMA_RB_CNTL); | ||
| 2387 | tmp &= ~DMA_RB_ENABLE; | ||
| 2388 | WREG32(DMA_RB_CNTL, tmp); | ||
| 2389 | |||
| 2390 | /* Reset dma */ | ||
| 2391 | WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); | ||
| 2392 | RREG32(SRBM_SOFT_RESET); | ||
| 2393 | udelay(50); | ||
| 2394 | WREG32(SRBM_SOFT_RESET, 0); | ||
| 2395 | |||
| 2396 | dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", | ||
| 2397 | RREG32(DMA_STATUS_REG)); | ||
| 2398 | } | ||
| 2399 | |||
| 2400 | static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) | ||
| 2401 | { | ||
| 2402 | struct evergreen_mc_save save; | ||
| 2403 | |||
| 2404 | if (reset_mask == 0) | ||
| 2405 | return 0; | ||
| 2406 | |||
| 2407 | dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); | ||
| 2408 | |||
| 2409 | evergreen_mc_stop(rdev, &save); | ||
| 2410 | if (evergreen_mc_wait_for_idle(rdev)) { | ||
| 2411 | dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); | ||
| 2412 | } | ||
| 2413 | |||
| 2414 | if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) | ||
| 2415 | evergreen_gpu_soft_reset_gfx(rdev); | ||
| 2416 | |||
| 2417 | if (reset_mask & RADEON_RESET_DMA) | ||
| 2418 | evergreen_gpu_soft_reset_dma(rdev); | ||
| 2419 | |||
| 2420 | /* Wait a little for things to settle down */ | ||
| 2421 | udelay(50); | ||
| 2422 | |||
| 2376 | evergreen_mc_resume(rdev, &save); | 2423 | evergreen_mc_resume(rdev, &save); |
| 2377 | return 0; | 2424 | return 0; |
| 2378 | } | 2425 | } |
| 2379 | 2426 | ||
| 2380 | int evergreen_asic_reset(struct radeon_device *rdev) | 2427 | int evergreen_asic_reset(struct radeon_device *rdev) |
| 2381 | { | 2428 | { |
| 2382 | return evergreen_gpu_soft_reset(rdev); | 2429 | return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX | |
| 2430 | RADEON_RESET_COMPUTE | | ||
| 2431 | RADEON_RESET_DMA)); | ||
| 2383 | } | 2432 | } |
| 2384 | 2433 | ||
| 2385 | /* Interrupts */ | 2434 | /* Interrupts */ |
| @@ -2401,8 +2450,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) | |||
| 2401 | CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | 2450 | CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
| 2402 | cayman_cp_int_cntl_setup(rdev, 1, 0); | 2451 | cayman_cp_int_cntl_setup(rdev, 1, 0); |
| 2403 | cayman_cp_int_cntl_setup(rdev, 2, 0); | 2452 | cayman_cp_int_cntl_setup(rdev, 2, 0); |
| 2453 | tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; | ||
| 2454 | WREG32(CAYMAN_DMA1_CNTL, tmp); | ||
| 2404 | } else | 2455 | } else |
| 2405 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); | 2456 | WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); |
| 2457 | tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; | ||
| 2458 | WREG32(DMA_CNTL, tmp); | ||
| 2406 | WREG32(GRBM_INT_CNTL, 0); | 2459 | WREG32(GRBM_INT_CNTL, 0); |
| 2407 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | 2460 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); |
| 2408 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | 2461 | WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); |
| @@ -2455,6 +2508,7 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 2455 | u32 grbm_int_cntl = 0; | 2508 | u32 grbm_int_cntl = 0; |
| 2456 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; | 2509 | u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; |
| 2457 | u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; | 2510 | u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; |
| 2511 | u32 dma_cntl, dma_cntl1 = 0; | ||
| 2458 | 2512 | ||
| 2459 | if (!rdev->irq.installed) { | 2513 | if (!rdev->irq.installed) { |
| 2460 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); | 2514 | WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); |
| @@ -2482,6 +2536,8 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 2482 | afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; | 2536 | afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; |
| 2483 | afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; | 2537 | afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; |
| 2484 | 2538 | ||
| 2539 | dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE; | ||
| 2540 | |||
| 2485 | if (rdev->family >= CHIP_CAYMAN) { | 2541 | if (rdev->family >= CHIP_CAYMAN) { |
| 2486 | /* enable CP interrupts on all rings */ | 2542 | /* enable CP interrupts on all rings */ |
| 2487 | if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { | 2543 | if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { |
| @@ -2504,6 +2560,19 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 2504 | } | 2560 | } |
| 2505 | } | 2561 | } |
| 2506 | 2562 | ||
| 2563 | if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { | ||
| 2564 | DRM_DEBUG("r600_irq_set: sw int dma\n"); | ||
| 2565 | dma_cntl |= TRAP_ENABLE; | ||
| 2566 | } | ||
| 2567 | |||
| 2568 | if (rdev->family >= CHIP_CAYMAN) { | ||
| 2569 | dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE; | ||
| 2570 | if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { | ||
| 2571 | DRM_DEBUG("r600_irq_set: sw int dma1\n"); | ||
| 2572 | dma_cntl1 |= TRAP_ENABLE; | ||
| 2573 | } | ||
| 2574 | } | ||
| 2575 | |||
| 2507 | if (rdev->irq.crtc_vblank_int[0] || | 2576 | if (rdev->irq.crtc_vblank_int[0] || |
| 2508 | atomic_read(&rdev->irq.pflip[0])) { | 2577 | atomic_read(&rdev->irq.pflip[0])) { |
| 2509 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); | 2578 | DRM_DEBUG("evergreen_irq_set: vblank 0\n"); |
| @@ -2589,6 +2658,12 @@ int evergreen_irq_set(struct radeon_device *rdev) | |||
| 2589 | cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); | 2658 | cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); |
| 2590 | } else | 2659 | } else |
| 2591 | WREG32(CP_INT_CNTL, cp_int_cntl); | 2660 | WREG32(CP_INT_CNTL, cp_int_cntl); |
| 2661 | |||
| 2662 | WREG32(DMA_CNTL, dma_cntl); | ||
| 2663 | |||
| 2664 | if (rdev->family >= CHIP_CAYMAN) | ||
| 2665 | WREG32(CAYMAN_DMA1_CNTL, dma_cntl1); | ||
| 2666 | |||
| 2592 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); | 2667 | WREG32(GRBM_INT_CNTL, grbm_int_cntl); |
| 2593 | 2668 | ||
| 2594 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); | 2669 | WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); |
| @@ -3091,6 +3166,16 @@ restart_ih: | |||
| 3091 | break; | 3166 | break; |
| 3092 | } | 3167 | } |
| 3093 | break; | 3168 | break; |
| 3169 | case 146: | ||
| 3170 | case 147: | ||
| 3171 | dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); | ||
| 3172 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", | ||
| 3173 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); | ||
| 3174 | dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", | ||
| 3175 | RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); | ||
| 3176 | /* reset addr and status */ | ||
| 3177 | WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); | ||
| 3178 | break; | ||
| 3094 | case 176: /* CP_INT in ring buffer */ | 3179 | case 176: /* CP_INT in ring buffer */ |
| 3095 | case 177: /* CP_INT in IB1 */ | 3180 | case 177: /* CP_INT in IB1 */ |
| 3096 | case 178: /* CP_INT in IB2 */ | 3181 | case 178: /* CP_INT in IB2 */ |
| @@ -3114,9 +3199,19 @@ restart_ih: | |||
| 3114 | } else | 3199 | } else |
| 3115 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); | 3200 | radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); |
| 3116 | break; | 3201 | break; |
| 3202 | case 224: /* DMA trap event */ | ||
| 3203 | DRM_DEBUG("IH: DMA trap\n"); | ||
| 3204 | radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); | ||
| 3205 | break; | ||
| 3117 | case 233: /* GUI IDLE */ | 3206 | case 233: /* GUI IDLE */ |
| 3118 | DRM_DEBUG("IH: GUI idle\n"); | 3207 | DRM_DEBUG("IH: GUI idle\n"); |
| 3119 | break; | 3208 | break; |
| 3209 | case 244: /* DMA trap event */ | ||
| 3210 | if (rdev->family >= CHIP_CAYMAN) { | ||
| 3211 | DRM_DEBUG("IH: DMA1 trap\n"); | ||
| 3212 | radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); | ||
| 3213 | } | ||
| 3214 | break; | ||
| 3120 | default: | 3215 | default: |
| 3121 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); | 3216 | DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); |
| 3122 | break; | 3217 | break; |
| @@ -3142,6 +3237,143 @@ restart_ih: | |||
| 3142 | return IRQ_HANDLED; | 3237 | return IRQ_HANDLED; |
| 3143 | } | 3238 | } |
| 3144 | 3239 | ||
| 3240 | /** | ||
| 3241 | * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring | ||
| 3242 | * | ||
| 3243 | * @rdev: radeon_device pointer | ||
| 3244 | * @fence: radeon fence object | ||
| 3245 | * | ||
| 3246 | * Add a DMA fence packet to the ring to write | ||
| 3247 | * the fence seq number and DMA trap packet to generate | ||
| 3248 | * an interrupt if needed (evergreen-SI). | ||
| 3249 | */ | ||
| 3250 | void evergreen_dma_fence_ring_emit(struct radeon_device *rdev, | ||
| 3251 | struct radeon_fence *fence) | ||
| 3252 | { | ||
| 3253 | struct radeon_ring *ring = &rdev->ring[fence->ring]; | ||
| 3254 | u64 addr = rdev->fence_drv[fence->ring].gpu_addr; | ||
| 3255 | /* write the fence */ | ||
| 3256 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); | ||
| 3257 | radeon_ring_write(ring, addr & 0xfffffffc); | ||
| 3258 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); | ||
| 3259 | radeon_ring_write(ring, fence->seq); | ||
| 3260 | /* generate an interrupt */ | ||
| 3261 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); | ||
| 3262 | /* flush HDP */ | ||
| 3263 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); | ||
| 3264 | radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); | ||
| 3265 | radeon_ring_write(ring, 1); | ||
| 3266 | } | ||
| 3267 | |||
| 3268 | /** | ||
| 3269 | * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine | ||
| 3270 | * | ||
| 3271 | * @rdev: radeon_device pointer | ||
| 3272 | * @ib: IB object to schedule | ||
| 3273 | * | ||
| 3274 | * Schedule an IB in the DMA ring (evergreen). | ||
| 3275 | */ | ||
| 3276 | void evergreen_dma_ring_ib_execute(struct radeon_device *rdev, | ||
| 3277 | struct radeon_ib *ib) | ||
| 3278 | { | ||
| 3279 | struct radeon_ring *ring = &rdev->ring[ib->ring]; | ||
| 3280 | |||
| 3281 | if (rdev->wb.enabled) { | ||
| 3282 | u32 next_rptr = ring->wptr + 4; | ||
| 3283 | while ((next_rptr & 7) != 5) | ||
| 3284 | next_rptr++; | ||
| 3285 | next_rptr += 3; | ||
| 3286 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); | ||
| 3287 | radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); | ||
| 3288 | radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); | ||
| 3289 | radeon_ring_write(ring, next_rptr); | ||
| 3290 | } | ||
| 3291 | |||
| 3292 | /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring. | ||
| 3293 | * Pad as necessary with NOPs. | ||
| 3294 | */ | ||
| 3295 | while ((ring->wptr & 7) != 5) | ||
| 3296 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | ||
| 3297 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); | ||
| 3298 | radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); | ||
| 3299 | radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); | ||
| 3300 | |||
| 3301 | } | ||
| 3302 | |||
| 3303 | /** | ||
| 3304 | * evergreen_copy_dma - copy pages using the DMA engine | ||
| 3305 | * | ||
| 3306 | * @rdev: radeon_device pointer | ||
| 3307 | * @src_offset: src GPU address | ||
| 3308 | * @dst_offset: dst GPU address | ||
| 3309 | * @num_gpu_pages: number of GPU pages to xfer | ||
| 3310 | * @fence: radeon fence object | ||
| 3311 | * | ||
| 3312 | * Copy GPU paging using the DMA engine (evergreen-cayman). | ||
| 3313 | * Used by the radeon ttm implementation to move pages if | ||
| 3314 | * registered as the asic copy callback. | ||
| 3315 | */ | ||
| 3316 | int evergreen_copy_dma(struct radeon_device *rdev, | ||
| 3317 | uint64_t src_offset, uint64_t dst_offset, | ||
| 3318 | unsigned num_gpu_pages, | ||
| 3319 | struct radeon_fence **fence) | ||
| 3320 | { | ||
| 3321 | struct radeon_semaphore *sem = NULL; | ||
| 3322 | int ring_index = rdev->asic->copy.dma_ring_index; | ||
| 3323 | struct radeon_ring *ring = &rdev->ring[ring_index]; | ||
| 3324 | u32 size_in_dw, cur_size_in_dw; | ||
| 3325 | int i, num_loops; | ||
| 3326 | int r = 0; | ||
| 3327 | |||
| 3328 | r = radeon_semaphore_create(rdev, &sem); | ||
| 3329 | if (r) { | ||
| 3330 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
| 3331 | return r; | ||
| 3332 | } | ||
| 3333 | |||
| 3334 | size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; | ||
| 3335 | num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); | ||
| 3336 | r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); | ||
| 3337 | if (r) { | ||
| 3338 | DRM_ERROR("radeon: moving bo (%d).\n", r); | ||
| 3339 | radeon_semaphore_free(rdev, &sem, NULL); | ||
| 3340 | return r; | ||
| 3341 | } | ||
| 3342 | |||
| 3343 | if (radeon_fence_need_sync(*fence, ring->idx)) { | ||
| 3344 | radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, | ||
| 3345 | ring->idx); | ||
| 3346 | radeon_fence_note_sync(*fence, ring->idx); | ||
| 3347 | } else { | ||
| 3348 | radeon_semaphore_free(rdev, &sem, NULL); | ||
| 3349 | } | ||
| 3350 | |||
| 3351 | for (i = 0; i < num_loops; i++) { | ||
| 3352 | cur_size_in_dw = size_in_dw; | ||
| 3353 | if (cur_size_in_dw > 0xFFFFF) | ||
| 3354 | cur_size_in_dw = 0xFFFFF; | ||
| 3355 | size_in_dw -= cur_size_in_dw; | ||
| 3356 | radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); | ||
| 3357 | radeon_ring_write(ring, dst_offset & 0xfffffffc); | ||
| 3358 | radeon_ring_write(ring, src_offset & 0xfffffffc); | ||
| 3359 | radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); | ||
| 3360 | radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); | ||
| 3361 | src_offset += cur_size_in_dw * 4; | ||
| 3362 | dst_offset += cur_size_in_dw * 4; | ||
| 3363 | } | ||
| 3364 | |||
| 3365 | r = radeon_fence_emit(rdev, fence, ring->idx); | ||
| 3366 | if (r) { | ||
| 3367 | radeon_ring_unlock_undo(rdev, ring); | ||
| 3368 | return r; | ||
| 3369 | } | ||
| 3370 | |||
| 3371 | radeon_ring_unlock_commit(rdev, ring); | ||
| 3372 | radeon_semaphore_free(rdev, &sem, *fence); | ||
| 3373 | |||
| 3374 | return r; | ||
| 3375 | } | ||
| 3376 | |||
| 3145 | static int evergreen_startup(struct radeon_device *rdev) | 3377 | static int evergreen_startup(struct radeon_device *rdev) |
| 3146 | { | 3378 | { |
| 3147 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 3379 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
| @@ -3205,6 +3437,12 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 3205 | return r; | 3437 | return r; |
| 3206 | } | 3438 | } |
| 3207 | 3439 | ||
| 3440 | r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); | ||
| 3441 | if (r) { | ||
| 3442 | dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); | ||
| 3443 | return r; | ||
| 3444 | } | ||
| 3445 | |||
| 3208 | /* Enable IRQ */ | 3446 | /* Enable IRQ */ |
| 3209 | r = r600_irq_init(rdev); | 3447 | r = r600_irq_init(rdev); |
| 3210 | if (r) { | 3448 | if (r) { |
| @@ -3219,12 +3457,23 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
| 3219 | 0, 0xfffff, RADEON_CP_PACKET2); | 3457 | 0, 0xfffff, RADEON_CP_PACKET2); |
| 3220 | if (r) | 3458 | if (r) |
| 3221 | return r; | 3459 | return r; |
| 3460 | |||
| 3461 | ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; | ||
| 3462 | r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, | ||
| 3463 | DMA_RB_RPTR, DMA_RB_WPTR, | ||
| 3464 | 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); | ||
| 3465 | if (r) | ||
| 3466 | return r; | ||
| 3467 | |||
| 3222 | r = evergreen_cp_load_microcode(rdev); | 3468 | r = evergreen_cp_load_microcode(rdev); |
| 3223 | if (r) | 3469 | if (r) |
| 3224 | return r; | 3470 | return r; |
| 3225 | r = evergreen_cp_resume(rdev); | 3471 | r = evergreen_cp_resume(rdev); |
| 3226 | if (r) | 3472 | if (r) |
| 3227 | return r; | 3473 | return r; |
| 3474 | r = r600_dma_resume(rdev); | ||
| 3475 | if (r) | ||
| 3476 | return r; | ||
| 3228 | 3477 | ||
| 3229 | r = radeon_ib_pool_init(rdev); | 3478 | r = radeon_ib_pool_init(rdev); |
| 3230 | if (r) { | 3479 | if (r) { |
| @@ -3271,11 +3520,9 @@ int evergreen_resume(struct radeon_device *rdev) | |||
| 3271 | 3520 | ||
| 3272 | int evergreen_suspend(struct radeon_device *rdev) | 3521 | int evergreen_suspend(struct radeon_device *rdev) |
| 3273 | { | 3522 | { |
| 3274 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | ||
| 3275 | |||
| 3276 | r600_audio_fini(rdev); | 3523 | r600_audio_fini(rdev); |
| 3277 | r700_cp_stop(rdev); | 3524 | r700_cp_stop(rdev); |
| 3278 | ring->ready = false; | 3525 | r600_dma_stop(rdev); |
| 3279 | evergreen_irq_suspend(rdev); | 3526 | evergreen_irq_suspend(rdev); |
| 3280 | radeon_wb_disable(rdev); | 3527 | radeon_wb_disable(rdev); |
| 3281 | evergreen_pcie_gart_disable(rdev); | 3528 | evergreen_pcie_gart_disable(rdev); |
| @@ -3352,6 +3599,9 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 3352 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; | 3599 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; |
| 3353 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); | 3600 | r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); |
| 3354 | 3601 | ||
| 3602 | rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL; | ||
| 3603 | r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024); | ||
| 3604 | |||
| 3355 | rdev->ih.ring_obj = NULL; | 3605 | rdev->ih.ring_obj = NULL; |
| 3356 | r600_ih_ring_init(rdev, 64 * 1024); | 3606 | r600_ih_ring_init(rdev, 64 * 1024); |
| 3357 | 3607 | ||
| @@ -3364,6 +3614,7 @@ int evergreen_init(struct radeon_device *rdev) | |||
| 3364 | if (r) { | 3614 | if (r) { |
| 3365 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 3615 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
| 3366 | r700_cp_fini(rdev); | 3616 | r700_cp_fini(rdev); |
| 3617 | r600_dma_fini(rdev); | ||
| 3367 | r600_irq_fini(rdev); | 3618 | r600_irq_fini(rdev); |
| 3368 | radeon_wb_fini(rdev); | 3619 | radeon_wb_fini(rdev); |
| 3369 | radeon_ib_pool_fini(rdev); | 3620 | radeon_ib_pool_fini(rdev); |
| @@ -3391,6 +3642,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
| 3391 | r600_audio_fini(rdev); | 3642 | r600_audio_fini(rdev); |
| 3392 | r600_blit_fini(rdev); | 3643 | r600_blit_fini(rdev); |
| 3393 | r700_cp_fini(rdev); | 3644 | r700_cp_fini(rdev); |
| 3645 | r600_dma_fini(rdev); | ||
| 3394 | r600_irq_fini(rdev); | 3646 | r600_irq_fini(rdev); |
| 3395 | radeon_wb_fini(rdev); | 3647 | radeon_wb_fini(rdev); |
| 3396 | radeon_ib_pool_fini(rdev); | 3648 | radeon_ib_pool_fini(rdev); |
