diff options
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/radeon/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 41 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r300.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r420.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r520.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 43 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 63 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_asic.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_object.h | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 254 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_sa.c | 189 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rs400.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rs600.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rs690.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv515.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv770.c | 43 |
18 files changed, 601 insertions, 292 deletions
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 94dcdc746041..2139fe893ec5 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile | |||
@@ -71,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ | |||
71 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ | 71 | r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ |
72 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ | 72 | evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ |
73 | radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \ | 73 | radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \ |
74 | radeon_semaphore.o | 74 | radeon_semaphore.o radeon_sa.o |
75 | 75 | ||
76 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o | 76 | radeon-$(CONFIG_COMPAT) += radeon_ioc32.o |
77 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o | 77 | radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 266d411c6d2b..1934728e2465 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -3182,6 +3182,17 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
3182 | if (r) | 3182 | if (r) |
3183 | return r; | 3183 | return r; |
3184 | 3184 | ||
3185 | r = radeon_ib_pool_start(rdev); | ||
3186 | if (r) | ||
3187 | return r; | ||
3188 | |||
3189 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
3190 | if (r) { | ||
3191 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
3192 | rdev->accel_working = false; | ||
3193 | return r; | ||
3194 | } | ||
3195 | |||
3185 | return 0; | 3196 | return 0; |
3186 | } | 3197 | } |
3187 | 3198 | ||
@@ -3201,18 +3212,13 @@ int evergreen_resume(struct radeon_device *rdev) | |||
3201 | /* post card */ | 3212 | /* post card */ |
3202 | atom_asic_init(rdev->mode_info.atom_context); | 3213 | atom_asic_init(rdev->mode_info.atom_context); |
3203 | 3214 | ||
3215 | rdev->accel_working = true; | ||
3204 | r = evergreen_startup(rdev); | 3216 | r = evergreen_startup(rdev); |
3205 | if (r) { | 3217 | if (r) { |
3206 | DRM_ERROR("evergreen startup failed on resume\n"); | 3218 | DRM_ERROR("evergreen startup failed on resume\n"); |
3207 | return r; | 3219 | return r; |
3208 | } | 3220 | } |
3209 | 3221 | ||
3210 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
3211 | if (r) { | ||
3212 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
3213 | return r; | ||
3214 | } | ||
3215 | |||
3216 | return r; | 3222 | return r; |
3217 | 3223 | ||
3218 | } | 3224 | } |
@@ -3222,12 +3228,13 @@ int evergreen_suspend(struct radeon_device *rdev) | |||
3222 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; | 3228 | struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; |
3223 | 3229 | ||
3224 | /* FIXME: we should wait for ring to be empty */ | 3230 | /* FIXME: we should wait for ring to be empty */ |
3231 | radeon_ib_pool_suspend(rdev); | ||
3232 | r600_blit_suspend(rdev); | ||
3225 | r700_cp_stop(rdev); | 3233 | r700_cp_stop(rdev); |
3226 | ring->ready = false; | 3234 | ring->ready = false; |
3227 | evergreen_irq_suspend(rdev); | 3235 | evergreen_irq_suspend(rdev); |
3228 | radeon_wb_disable(rdev); | 3236 | radeon_wb_disable(rdev); |
3229 | evergreen_pcie_gart_disable(rdev); | 3237 | evergreen_pcie_gart_disable(rdev); |
3230 | r600_blit_suspend(rdev); | ||
3231 | 3238 | ||
3232 | return 0; | 3239 | return 0; |
3233 | } | 3240 | } |
@@ -3312,29 +3319,24 @@ int evergreen_init(struct radeon_device *rdev) | |||
3312 | if (r) | 3319 | if (r) |
3313 | return r; | 3320 | return r; |
3314 | 3321 | ||
3322 | r = radeon_ib_pool_init(rdev); | ||
3315 | rdev->accel_working = true; | 3323 | rdev->accel_working = true; |
3324 | if (r) { | ||
3325 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
3326 | rdev->accel_working = false; | ||
3327 | } | ||
3328 | |||
3316 | r = evergreen_startup(rdev); | 3329 | r = evergreen_startup(rdev); |
3317 | if (r) { | 3330 | if (r) { |
3318 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 3331 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
3319 | r700_cp_fini(rdev); | 3332 | r700_cp_fini(rdev); |
3320 | r600_irq_fini(rdev); | 3333 | r600_irq_fini(rdev); |
3321 | radeon_wb_fini(rdev); | 3334 | radeon_wb_fini(rdev); |
3335 | r100_ib_fini(rdev); | ||
3322 | radeon_irq_kms_fini(rdev); | 3336 | radeon_irq_kms_fini(rdev); |
3323 | evergreen_pcie_gart_fini(rdev); | 3337 | evergreen_pcie_gart_fini(rdev); |
3324 | rdev->accel_working = false; | 3338 | rdev->accel_working = false; |
3325 | } | 3339 | } |
3326 | if (rdev->accel_working) { | ||
3327 | r = radeon_ib_pool_init(rdev); | ||
3328 | if (r) { | ||
3329 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | ||
3330 | rdev->accel_working = false; | ||
3331 | } | ||
3332 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
3333 | if (r) { | ||
3334 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
3335 | rdev->accel_working = false; | ||
3336 | } | ||
3337 | } | ||
3338 | return 0; | 3340 | return 0; |
3339 | } | 3341 | } |
3340 | 3342 | ||
@@ -3344,7 +3346,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
3344 | r700_cp_fini(rdev); | 3346 | r700_cp_fini(rdev); |
3345 | r600_irq_fini(rdev); | 3347 | r600_irq_fini(rdev); |
3346 | radeon_wb_fini(rdev); | 3348 | radeon_wb_fini(rdev); |
3347 | radeon_ib_pool_fini(rdev); | 3349 | r100_ib_fini(rdev); |
3348 | radeon_irq_kms_fini(rdev); | 3350 | radeon_irq_kms_fini(rdev); |
3349 | evergreen_pcie_gart_fini(rdev); | 3351 | evergreen_pcie_gart_fini(rdev); |
3350 | r600_vram_scratch_fini(rdev); | 3352 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 30562622b94a..d89b2ebd5bbb 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -1453,6 +1453,17 @@ static int cayman_startup(struct radeon_device *rdev) | |||
1453 | if (r) | 1453 | if (r) |
1454 | return r; | 1454 | return r; |
1455 | 1455 | ||
1456 | r = radeon_ib_pool_start(rdev); | ||
1457 | if (r) | ||
1458 | return r; | ||
1459 | |||
1460 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1461 | if (r) { | ||
1462 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1463 | rdev->accel_working = false; | ||
1464 | return r; | ||
1465 | } | ||
1466 | |||
1456 | return 0; | 1467 | return 0; |
1457 | } | 1468 | } |
1458 | 1469 | ||
@@ -1467,32 +1478,25 @@ int cayman_resume(struct radeon_device *rdev) | |||
1467 | /* post card */ | 1478 | /* post card */ |
1468 | atom_asic_init(rdev->mode_info.atom_context); | 1479 | atom_asic_init(rdev->mode_info.atom_context); |
1469 | 1480 | ||
1481 | rdev->accel_working = true; | ||
1470 | r = cayman_startup(rdev); | 1482 | r = cayman_startup(rdev); |
1471 | if (r) { | 1483 | if (r) { |
1472 | DRM_ERROR("cayman startup failed on resume\n"); | 1484 | DRM_ERROR("cayman startup failed on resume\n"); |
1473 | return r; | 1485 | return r; |
1474 | } | 1486 | } |
1475 | |||
1476 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1477 | if (r) { | ||
1478 | DRM_ERROR("radeon: failled testing IB (%d).\n", r); | ||
1479 | return r; | ||
1480 | } | ||
1481 | |||
1482 | return r; | 1487 | return r; |
1483 | |||
1484 | } | 1488 | } |
1485 | 1489 | ||
1486 | int cayman_suspend(struct radeon_device *rdev) | 1490 | int cayman_suspend(struct radeon_device *rdev) |
1487 | { | 1491 | { |
1488 | /* FIXME: we should wait for ring to be empty */ | 1492 | /* FIXME: we should wait for ring to be empty */ |
1493 | radeon_ib_pool_suspend(rdev); | ||
1494 | r600_blit_suspend(rdev); | ||
1489 | cayman_cp_enable(rdev, false); | 1495 | cayman_cp_enable(rdev, false); |
1490 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1496 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1491 | evergreen_irq_suspend(rdev); | 1497 | evergreen_irq_suspend(rdev); |
1492 | radeon_wb_disable(rdev); | 1498 | radeon_wb_disable(rdev); |
1493 | cayman_pcie_gart_disable(rdev); | 1499 | cayman_pcie_gart_disable(rdev); |
1494 | r600_blit_suspend(rdev); | ||
1495 | |||
1496 | return 0; | 1500 | return 0; |
1497 | } | 1501 | } |
1498 | 1502 | ||
@@ -1567,29 +1571,24 @@ int cayman_init(struct radeon_device *rdev) | |||
1567 | if (r) | 1571 | if (r) |
1568 | return r; | 1572 | return r; |
1569 | 1573 | ||
1574 | r = radeon_ib_pool_init(rdev); | ||
1570 | rdev->accel_working = true; | 1575 | rdev->accel_working = true; |
1576 | if (r) { | ||
1577 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
1578 | rdev->accel_working = false; | ||
1579 | } | ||
1580 | |||
1571 | r = cayman_startup(rdev); | 1581 | r = cayman_startup(rdev); |
1572 | if (r) { | 1582 | if (r) { |
1573 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 1583 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1574 | cayman_cp_fini(rdev); | 1584 | cayman_cp_fini(rdev); |
1575 | r600_irq_fini(rdev); | 1585 | r600_irq_fini(rdev); |
1576 | radeon_wb_fini(rdev); | 1586 | radeon_wb_fini(rdev); |
1587 | r100_ib_fini(rdev); | ||
1577 | radeon_irq_kms_fini(rdev); | 1588 | radeon_irq_kms_fini(rdev); |
1578 | cayman_pcie_gart_fini(rdev); | 1589 | cayman_pcie_gart_fini(rdev); |
1579 | rdev->accel_working = false; | 1590 | rdev->accel_working = false; |
1580 | } | 1591 | } |
1581 | if (rdev->accel_working) { | ||
1582 | r = radeon_ib_pool_init(rdev); | ||
1583 | if (r) { | ||
1584 | DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); | ||
1585 | rdev->accel_working = false; | ||
1586 | } | ||
1587 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1588 | if (r) { | ||
1589 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1590 | rdev->accel_working = false; | ||
1591 | } | ||
1592 | } | ||
1593 | 1592 | ||
1594 | /* Don't start up if the MC ucode is missing. | 1593 | /* Don't start up if the MC ucode is missing. |
1595 | * The default clocks and voltages before the MC ucode | 1594 | * The default clocks and voltages before the MC ucode |
@@ -1609,7 +1608,7 @@ void cayman_fini(struct radeon_device *rdev) | |||
1609 | cayman_cp_fini(rdev); | 1608 | cayman_cp_fini(rdev); |
1610 | r600_irq_fini(rdev); | 1609 | r600_irq_fini(rdev); |
1611 | radeon_wb_fini(rdev); | 1610 | radeon_wb_fini(rdev); |
1612 | radeon_ib_pool_fini(rdev); | 1611 | r100_ib_fini(rdev); |
1613 | radeon_irq_kms_fini(rdev); | 1612 | radeon_irq_kms_fini(rdev); |
1614 | cayman_pcie_gart_fini(rdev); | 1613 | cayman_pcie_gart_fini(rdev); |
1615 | r600_vram_scratch_fini(rdev); | 1614 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index d7fd5aa47053..657040b15b06 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -3752,28 +3752,10 @@ int r100_ib_test(struct radeon_device *rdev) | |||
3752 | 3752 | ||
3753 | void r100_ib_fini(struct radeon_device *rdev) | 3753 | void r100_ib_fini(struct radeon_device *rdev) |
3754 | { | 3754 | { |
3755 | radeon_ib_pool_suspend(rdev); | ||
3755 | radeon_ib_pool_fini(rdev); | 3756 | radeon_ib_pool_fini(rdev); |
3756 | } | 3757 | } |
3757 | 3758 | ||
3758 | int r100_ib_init(struct radeon_device *rdev) | ||
3759 | { | ||
3760 | int r; | ||
3761 | |||
3762 | r = radeon_ib_pool_init(rdev); | ||
3763 | if (r) { | ||
3764 | dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r); | ||
3765 | r100_ib_fini(rdev); | ||
3766 | return r; | ||
3767 | } | ||
3768 | r = r100_ib_test(rdev); | ||
3769 | if (r) { | ||
3770 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); | ||
3771 | r100_ib_fini(rdev); | ||
3772 | return r; | ||
3773 | } | ||
3774 | return 0; | ||
3775 | } | ||
3776 | |||
3777 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) | 3759 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) |
3778 | { | 3760 | { |
3779 | /* Shutdown CP we shouldn't need to do that but better be safe than | 3761 | /* Shutdown CP we shouldn't need to do that but better be safe than |
@@ -3932,11 +3914,18 @@ static int r100_startup(struct radeon_device *rdev) | |||
3932 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 3914 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
3933 | return r; | 3915 | return r; |
3934 | } | 3916 | } |
3935 | r = r100_ib_init(rdev); | 3917 | |
3918 | r = radeon_ib_pool_start(rdev); | ||
3919 | if (r) | ||
3920 | return r; | ||
3921 | |||
3922 | r = r100_ib_test(rdev); | ||
3936 | if (r) { | 3923 | if (r) { |
3937 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 3924 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
3925 | rdev->accel_working = false; | ||
3938 | return r; | 3926 | return r; |
3939 | } | 3927 | } |
3928 | |||
3940 | return 0; | 3929 | return 0; |
3941 | } | 3930 | } |
3942 | 3931 | ||
@@ -3959,11 +3948,14 @@ int r100_resume(struct radeon_device *rdev) | |||
3959 | r100_clock_startup(rdev); | 3948 | r100_clock_startup(rdev); |
3960 | /* Initialize surface registers */ | 3949 | /* Initialize surface registers */ |
3961 | radeon_surface_init(rdev); | 3950 | radeon_surface_init(rdev); |
3951 | |||
3952 | rdev->accel_working = true; | ||
3962 | return r100_startup(rdev); | 3953 | return r100_startup(rdev); |
3963 | } | 3954 | } |
3964 | 3955 | ||
3965 | int r100_suspend(struct radeon_device *rdev) | 3956 | int r100_suspend(struct radeon_device *rdev) |
3966 | { | 3957 | { |
3958 | radeon_ib_pool_suspend(rdev); | ||
3967 | r100_cp_disable(rdev); | 3959 | r100_cp_disable(rdev); |
3968 | radeon_wb_disable(rdev); | 3960 | radeon_wb_disable(rdev); |
3969 | r100_irq_disable(rdev); | 3961 | r100_irq_disable(rdev); |
@@ -4082,7 +4074,14 @@ int r100_init(struct radeon_device *rdev) | |||
4082 | return r; | 4074 | return r; |
4083 | } | 4075 | } |
4084 | r100_set_safe_registers(rdev); | 4076 | r100_set_safe_registers(rdev); |
4077 | |||
4078 | r = radeon_ib_pool_init(rdev); | ||
4085 | rdev->accel_working = true; | 4079 | rdev->accel_working = true; |
4080 | if (r) { | ||
4081 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
4082 | rdev->accel_working = false; | ||
4083 | } | ||
4084 | |||
4086 | r = r100_startup(rdev); | 4085 | r = r100_startup(rdev); |
4087 | if (r) { | 4086 | if (r) { |
4088 | /* Somethings want wront with the accel init stop accel */ | 4087 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index e2dfae4b40e6..8ad5c6475d55 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -1414,11 +1414,18 @@ static int r300_startup(struct radeon_device *rdev) | |||
1414 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 1414 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
1415 | return r; | 1415 | return r; |
1416 | } | 1416 | } |
1417 | r = r100_ib_init(rdev); | 1417 | |
1418 | r = radeon_ib_pool_start(rdev); | ||
1419 | if (r) | ||
1420 | return r; | ||
1421 | |||
1422 | r = r100_ib_test(rdev); | ||
1418 | if (r) { | 1423 | if (r) { |
1419 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 1424 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
1425 | rdev->accel_working = false; | ||
1420 | return r; | 1426 | return r; |
1421 | } | 1427 | } |
1428 | |||
1422 | return 0; | 1429 | return 0; |
1423 | } | 1430 | } |
1424 | 1431 | ||
@@ -1443,11 +1450,14 @@ int r300_resume(struct radeon_device *rdev) | |||
1443 | r300_clock_startup(rdev); | 1450 | r300_clock_startup(rdev); |
1444 | /* Initialize surface registers */ | 1451 | /* Initialize surface registers */ |
1445 | radeon_surface_init(rdev); | 1452 | radeon_surface_init(rdev); |
1453 | |||
1454 | rdev->accel_working = true; | ||
1446 | return r300_startup(rdev); | 1455 | return r300_startup(rdev); |
1447 | } | 1456 | } |
1448 | 1457 | ||
1449 | int r300_suspend(struct radeon_device *rdev) | 1458 | int r300_suspend(struct radeon_device *rdev) |
1450 | { | 1459 | { |
1460 | radeon_ib_pool_suspend(rdev); | ||
1451 | r100_cp_disable(rdev); | 1461 | r100_cp_disable(rdev); |
1452 | radeon_wb_disable(rdev); | 1462 | radeon_wb_disable(rdev); |
1453 | r100_irq_disable(rdev); | 1463 | r100_irq_disable(rdev); |
@@ -1548,7 +1558,14 @@ int r300_init(struct radeon_device *rdev) | |||
1548 | return r; | 1558 | return r; |
1549 | } | 1559 | } |
1550 | r300_set_reg_safe(rdev); | 1560 | r300_set_reg_safe(rdev); |
1561 | |||
1562 | r = radeon_ib_pool_init(rdev); | ||
1551 | rdev->accel_working = true; | 1563 | rdev->accel_working = true; |
1564 | if (r) { | ||
1565 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
1566 | rdev->accel_working = false; | ||
1567 | } | ||
1568 | |||
1552 | r = r300_startup(rdev); | 1569 | r = r300_startup(rdev); |
1553 | if (r) { | 1570 | if (r) { |
1554 | /* Somethings want wront with the accel init stop accel */ | 1571 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 17ecff1000e7..666e28fe509c 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c | |||
@@ -274,11 +274,18 @@ static int r420_startup(struct radeon_device *rdev) | |||
274 | return r; | 274 | return r; |
275 | } | 275 | } |
276 | r420_cp_errata_init(rdev); | 276 | r420_cp_errata_init(rdev); |
277 | r = r100_ib_init(rdev); | 277 | |
278 | r = radeon_ib_pool_start(rdev); | ||
279 | if (r) | ||
280 | return r; | ||
281 | |||
282 | r = r100_ib_test(rdev); | ||
278 | if (r) { | 283 | if (r) { |
279 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 284 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
285 | rdev->accel_working = false; | ||
280 | return r; | 286 | return r; |
281 | } | 287 | } |
288 | |||
282 | return 0; | 289 | return 0; |
283 | } | 290 | } |
284 | 291 | ||
@@ -307,11 +314,14 @@ int r420_resume(struct radeon_device *rdev) | |||
307 | r420_clock_resume(rdev); | 314 | r420_clock_resume(rdev); |
308 | /* Initialize surface registers */ | 315 | /* Initialize surface registers */ |
309 | radeon_surface_init(rdev); | 316 | radeon_surface_init(rdev); |
317 | |||
318 | rdev->accel_working = true; | ||
310 | return r420_startup(rdev); | 319 | return r420_startup(rdev); |
311 | } | 320 | } |
312 | 321 | ||
313 | int r420_suspend(struct radeon_device *rdev) | 322 | int r420_suspend(struct radeon_device *rdev) |
314 | { | 323 | { |
324 | radeon_ib_pool_suspend(rdev); | ||
315 | r420_cp_errata_fini(rdev); | 325 | r420_cp_errata_fini(rdev); |
316 | r100_cp_disable(rdev); | 326 | r100_cp_disable(rdev); |
317 | radeon_wb_disable(rdev); | 327 | radeon_wb_disable(rdev); |
@@ -424,7 +434,14 @@ int r420_init(struct radeon_device *rdev) | |||
424 | return r; | 434 | return r; |
425 | } | 435 | } |
426 | r420_set_reg_safe(rdev); | 436 | r420_set_reg_safe(rdev); |
437 | |||
438 | r = radeon_ib_pool_init(rdev); | ||
427 | rdev->accel_working = true; | 439 | rdev->accel_working = true; |
440 | if (r) { | ||
441 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
442 | rdev->accel_working = false; | ||
443 | } | ||
444 | |||
428 | r = r420_startup(rdev); | 445 | r = r420_startup(rdev); |
429 | if (r) { | 446 | if (r) { |
430 | /* Somethings want wront with the accel init stop accel */ | 447 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 39b31544c6e7..4ae1615e752f 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c | |||
@@ -202,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev) | |||
202 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 202 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
203 | return r; | 203 | return r; |
204 | } | 204 | } |
205 | r = r100_ib_init(rdev); | 205 | |
206 | r = radeon_ib_pool_start(rdev); | ||
207 | if (r) | ||
208 | return r; | ||
209 | |||
210 | r = r100_ib_test(rdev); | ||
206 | if (r) { | 211 | if (r) { |
207 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 212 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
213 | rdev->accel_working = false; | ||
208 | return r; | 214 | return r; |
209 | } | 215 | } |
210 | return 0; | 216 | return 0; |
@@ -229,6 +235,8 @@ int r520_resume(struct radeon_device *rdev) | |||
229 | rv515_clock_startup(rdev); | 235 | rv515_clock_startup(rdev); |
230 | /* Initialize surface registers */ | 236 | /* Initialize surface registers */ |
231 | radeon_surface_init(rdev); | 237 | radeon_surface_init(rdev); |
238 | |||
239 | rdev->accel_working = true; | ||
232 | return r520_startup(rdev); | 240 | return r520_startup(rdev); |
233 | } | 241 | } |
234 | 242 | ||
@@ -298,7 +306,14 @@ int r520_init(struct radeon_device *rdev) | |||
298 | if (r) | 306 | if (r) |
299 | return r; | 307 | return r; |
300 | rv515_set_safe_registers(rdev); | 308 | rv515_set_safe_registers(rdev); |
309 | |||
310 | r = radeon_ib_pool_init(rdev); | ||
301 | rdev->accel_working = true; | 311 | rdev->accel_working = true; |
312 | if (r) { | ||
313 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
314 | rdev->accel_working = false; | ||
315 | } | ||
316 | |||
302 | r = r520_startup(rdev); | 317 | r = r520_startup(rdev); |
303 | if (r) { | 318 | if (r) { |
304 | /* Somethings want wront with the accel init stop accel */ | 319 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 0f39cc661a7e..951566f1de9e 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2486,6 +2486,17 @@ int r600_startup(struct radeon_device *rdev) | |||
2486 | if (r) | 2486 | if (r) |
2487 | return r; | 2487 | return r; |
2488 | 2488 | ||
2489 | r = radeon_ib_pool_start(rdev); | ||
2490 | if (r) | ||
2491 | return r; | ||
2492 | |||
2493 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
2494 | if (r) { | ||
2495 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
2496 | rdev->accel_working = false; | ||
2497 | return r; | ||
2498 | } | ||
2499 | |||
2489 | return 0; | 2500 | return 0; |
2490 | } | 2501 | } |
2491 | 2502 | ||
@@ -2514,18 +2525,13 @@ int r600_resume(struct radeon_device *rdev) | |||
2514 | /* post card */ | 2525 | /* post card */ |
2515 | atom_asic_init(rdev->mode_info.atom_context); | 2526 | atom_asic_init(rdev->mode_info.atom_context); |
2516 | 2527 | ||
2528 | rdev->accel_working = true; | ||
2517 | r = r600_startup(rdev); | 2529 | r = r600_startup(rdev); |
2518 | if (r) { | 2530 | if (r) { |
2519 | DRM_ERROR("r600 startup failed on resume\n"); | 2531 | DRM_ERROR("r600 startup failed on resume\n"); |
2520 | return r; | 2532 | return r; |
2521 | } | 2533 | } |
2522 | 2534 | ||
2523 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
2524 | if (r) { | ||
2525 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
2526 | return r; | ||
2527 | } | ||
2528 | |||
2529 | r = r600_audio_init(rdev); | 2535 | r = r600_audio_init(rdev); |
2530 | if (r) { | 2536 | if (r) { |
2531 | DRM_ERROR("radeon: audio resume failed\n"); | 2537 | DRM_ERROR("radeon: audio resume failed\n"); |
@@ -2538,13 +2544,14 @@ int r600_resume(struct radeon_device *rdev) | |||
2538 | int r600_suspend(struct radeon_device *rdev) | 2544 | int r600_suspend(struct radeon_device *rdev) |
2539 | { | 2545 | { |
2540 | r600_audio_fini(rdev); | 2546 | r600_audio_fini(rdev); |
2547 | radeon_ib_pool_suspend(rdev); | ||
2548 | r600_blit_suspend(rdev); | ||
2541 | /* FIXME: we should wait for ring to be empty */ | 2549 | /* FIXME: we should wait for ring to be empty */ |
2542 | r600_cp_stop(rdev); | 2550 | r600_cp_stop(rdev); |
2543 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 2551 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
2544 | r600_irq_suspend(rdev); | 2552 | r600_irq_suspend(rdev); |
2545 | radeon_wb_disable(rdev); | 2553 | radeon_wb_disable(rdev); |
2546 | r600_pcie_gart_disable(rdev); | 2554 | r600_pcie_gart_disable(rdev); |
2547 | r600_blit_suspend(rdev); | ||
2548 | 2555 | ||
2549 | return 0; | 2556 | return 0; |
2550 | } | 2557 | } |
@@ -2625,30 +2632,24 @@ int r600_init(struct radeon_device *rdev) | |||
2625 | if (r) | 2632 | if (r) |
2626 | return r; | 2633 | return r; |
2627 | 2634 | ||
2635 | r = radeon_ib_pool_init(rdev); | ||
2628 | rdev->accel_working = true; | 2636 | rdev->accel_working = true; |
2637 | if (r) { | ||
2638 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
2639 | rdev->accel_working = false; | ||
2640 | } | ||
2641 | |||
2629 | r = r600_startup(rdev); | 2642 | r = r600_startup(rdev); |
2630 | if (r) { | 2643 | if (r) { |
2631 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 2644 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
2632 | r600_cp_fini(rdev); | 2645 | r600_cp_fini(rdev); |
2633 | r600_irq_fini(rdev); | 2646 | r600_irq_fini(rdev); |
2634 | radeon_wb_fini(rdev); | 2647 | radeon_wb_fini(rdev); |
2648 | r100_ib_fini(rdev); | ||
2635 | radeon_irq_kms_fini(rdev); | 2649 | radeon_irq_kms_fini(rdev); |
2636 | r600_pcie_gart_fini(rdev); | 2650 | r600_pcie_gart_fini(rdev); |
2637 | rdev->accel_working = false; | 2651 | rdev->accel_working = false; |
2638 | } | 2652 | } |
2639 | if (rdev->accel_working) { | ||
2640 | r = radeon_ib_pool_init(rdev); | ||
2641 | if (r) { | ||
2642 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
2643 | rdev->accel_working = false; | ||
2644 | } else { | ||
2645 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
2646 | if (r) { | ||
2647 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
2648 | rdev->accel_working = false; | ||
2649 | } | ||
2650 | } | ||
2651 | } | ||
2652 | 2653 | ||
2653 | r = r600_audio_init(rdev); | 2654 | r = r600_audio_init(rdev); |
2654 | if (r) | 2655 | if (r) |
@@ -2663,7 +2664,7 @@ void r600_fini(struct radeon_device *rdev) | |||
2663 | r600_cp_fini(rdev); | 2664 | r600_cp_fini(rdev); |
2664 | r600_irq_fini(rdev); | 2665 | r600_irq_fini(rdev); |
2665 | radeon_wb_fini(rdev); | 2666 | radeon_wb_fini(rdev); |
2666 | radeon_ib_pool_fini(rdev); | 2667 | r100_ib_fini(rdev); |
2667 | radeon_irq_kms_fini(rdev); | 2668 | radeon_irq_kms_fini(rdev); |
2668 | r600_pcie_gart_fini(rdev); | 2669 | r600_pcie_gart_fini(rdev); |
2669 | r600_vram_scratch_fini(rdev); | 2670 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b4c2d0fe34e3..f29edbf62962 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -316,6 +316,48 @@ struct radeon_bo_list { | |||
316 | u32 tiling_flags; | 316 | u32 tiling_flags; |
317 | }; | 317 | }; |
318 | 318 | ||
319 | /* sub-allocation manager, it has to be protected by another lock. | ||
320 | * By conception this is an helper for other part of the driver | ||
321 | * like the indirect buffer or semaphore, which both have their | ||
322 | * locking. | ||
323 | * | ||
324 | * Principe is simple, we keep a list of sub allocation in offset | ||
325 | * order (first entry has offset == 0, last entry has the highest | ||
326 | * offset). | ||
327 | * | ||
328 | * When allocating new object we first check if there is room at | ||
329 | * the end total_size - (last_object_offset + last_object_size) >= | ||
330 | * alloc_size. If so we allocate new object there. | ||
331 | * | ||
332 | * When there is not enough room at the end, we start waiting for | ||
333 | * each sub object until we reach object_offset+object_size >= | ||
334 | * alloc_size, this object then become the sub object we return. | ||
335 | * | ||
336 | * Alignment can't be bigger than page size. | ||
337 | * | ||
338 | * Hole are not considered for allocation to keep things simple. | ||
339 | * Assumption is that there won't be hole (all object on same | ||
340 | * alignment). | ||
341 | */ | ||
342 | struct radeon_sa_manager { | ||
343 | struct radeon_bo *bo; | ||
344 | struct list_head sa_bo; | ||
345 | unsigned size; | ||
346 | uint64_t gpu_addr; | ||
347 | void *cpu_ptr; | ||
348 | uint32_t domain; | ||
349 | }; | ||
350 | |||
351 | struct radeon_sa_bo; | ||
352 | |||
353 | /* sub-allocation buffer */ | ||
354 | struct radeon_sa_bo { | ||
355 | struct list_head list; | ||
356 | struct radeon_sa_manager *manager; | ||
357 | unsigned offset; | ||
358 | unsigned size; | ||
359 | }; | ||
360 | |||
319 | /* | 361 | /* |
320 | * GEM objects. | 362 | * GEM objects. |
321 | */ | 363 | */ |
@@ -503,13 +545,12 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); | |||
503 | */ | 545 | */ |
504 | 546 | ||
505 | struct radeon_ib { | 547 | struct radeon_ib { |
506 | struct list_head list; | 548 | struct radeon_sa_bo sa_bo; |
507 | unsigned idx; | 549 | unsigned idx; |
550 | uint32_t length_dw; | ||
508 | uint64_t gpu_addr; | 551 | uint64_t gpu_addr; |
509 | struct radeon_fence *fence; | ||
510 | uint32_t *ptr; | 552 | uint32_t *ptr; |
511 | uint32_t length_dw; | 553 | struct radeon_fence *fence; |
512 | bool free; | ||
513 | }; | 554 | }; |
514 | 555 | ||
515 | /* | 556 | /* |
@@ -517,12 +558,11 @@ struct radeon_ib { | |||
517 | * mutex protects scheduled_ibs, ready, alloc_bm | 558 | * mutex protects scheduled_ibs, ready, alloc_bm |
518 | */ | 559 | */ |
519 | struct radeon_ib_pool { | 560 | struct radeon_ib_pool { |
520 | struct mutex mutex; | 561 | struct mutex mutex; |
521 | struct radeon_bo *robj; | 562 | struct radeon_sa_manager sa_manager; |
522 | struct list_head bogus_ib; | 563 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
523 | struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; | 564 | bool ready; |
524 | bool ready; | 565 | unsigned head_id; |
525 | unsigned head_id; | ||
526 | }; | 566 | }; |
527 | 567 | ||
528 | struct radeon_ring { | 568 | struct radeon_ring { |
@@ -603,8 +643,9 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); | |||
603 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); | 643 | int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); |
604 | int radeon_ib_pool_init(struct radeon_device *rdev); | 644 | int radeon_ib_pool_init(struct radeon_device *rdev); |
605 | void radeon_ib_pool_fini(struct radeon_device *rdev); | 645 | void radeon_ib_pool_fini(struct radeon_device *rdev); |
646 | int radeon_ib_pool_start(struct radeon_device *rdev); | ||
647 | int radeon_ib_pool_suspend(struct radeon_device *rdev); | ||
606 | int radeon_ib_test(struct radeon_device *rdev); | 648 | int radeon_ib_test(struct radeon_device *rdev); |
607 | extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); | ||
608 | /* Ring access between begin & end cannot sleep */ | 649 | /* Ring access between begin & end cannot sleep */ |
609 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); | 650 | int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp); |
610 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); | 651 | void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f0bab7878069..c002ed1c4483 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -109,7 +109,7 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, | |||
109 | struct r100_gpu_lockup *lockup, | 109 | struct r100_gpu_lockup *lockup, |
110 | struct radeon_ring *cp); | 110 | struct radeon_ring *cp); |
111 | void r100_ib_fini(struct radeon_device *rdev); | 111 | void r100_ib_fini(struct radeon_device *rdev); |
112 | int r100_ib_init(struct radeon_device *rdev); | 112 | int r100_ib_test(struct radeon_device *rdev); |
113 | void r100_irq_disable(struct radeon_device *rdev); | 113 | void r100_irq_disable(struct radeon_device *rdev); |
114 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); | 114 | void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
115 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); | 115 | void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index b07f0f9b8627..cc236fb128ae 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h | |||
@@ -128,4 +128,24 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |||
128 | struct ttm_mem_reg *mem); | 128 | struct ttm_mem_reg *mem); |
129 | extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); | 129 | extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
130 | extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); | 130 | extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); |
131 | |||
132 | /* | ||
133 | * sub allocation | ||
134 | */ | ||
135 | extern int radeon_sa_bo_manager_init(struct radeon_device *rdev, | ||
136 | struct radeon_sa_manager *sa_manager, | ||
137 | unsigned size, u32 domain); | ||
138 | extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev, | ||
139 | struct radeon_sa_manager *sa_manager); | ||
140 | extern int radeon_sa_bo_manager_start(struct radeon_device *rdev, | ||
141 | struct radeon_sa_manager *sa_manager); | ||
142 | extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, | ||
143 | struct radeon_sa_manager *sa_manager); | ||
144 | extern int radeon_sa_bo_new(struct radeon_device *rdev, | ||
145 | struct radeon_sa_manager *sa_manager, | ||
146 | struct radeon_sa_bo *sa_bo, | ||
147 | unsigned size, unsigned align); | ||
148 | extern void radeon_sa_bo_free(struct radeon_device *rdev, | ||
149 | struct radeon_sa_bo *sa_bo); | ||
150 | |||
131 | #endif | 151 | #endif |
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 133e2636cea0..f6a4fbd102a0 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c | |||
@@ -74,92 +74,90 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v) | |||
74 | ring->ring_free_dw--; | 74 | ring->ring_free_dw--; |
75 | } | 75 | } |
76 | 76 | ||
77 | void radeon_ib_bogus_cleanup(struct radeon_device *rdev) | 77 | /* |
78 | { | 78 | * IB. |
79 | struct radeon_ib *ib, *n; | 79 | */ |
80 | 80 | static bool radeon_ib_try_free(struct radeon_device *rdev, | |
81 | list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) { | 81 | struct radeon_ib *ib) |
82 | list_del(&ib->list); | ||
83 | vfree(ib->ptr); | ||
84 | kfree(ib); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib) | ||
89 | { | 82 | { |
90 | struct radeon_ib *bib; | 83 | bool done = false; |
91 | 84 | ||
92 | bib = kmalloc(sizeof(*bib), GFP_KERNEL); | 85 | /* only free ib which have been emited */ |
93 | if (bib == NULL) | 86 | if (ib->fence && ib->fence->emitted) { |
94 | return; | 87 | if (radeon_fence_signaled(ib->fence)) { |
95 | bib->ptr = vmalloc(ib->length_dw * 4); | 88 | radeon_fence_unref(&ib->fence); |
96 | if (bib->ptr == NULL) { | 89 | radeon_sa_bo_free(rdev, &ib->sa_bo); |
97 | kfree(bib); | 90 | done = true; |
98 | return; | 91 | } |
99 | } | 92 | } |
100 | memcpy(bib->ptr, ib->ptr, ib->length_dw * 4); | 93 | return done; |
101 | bib->length_dw = ib->length_dw; | ||
102 | mutex_lock(&rdev->ib_pool.mutex); | ||
103 | list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib); | ||
104 | mutex_unlock(&rdev->ib_pool.mutex); | ||
105 | } | 94 | } |
106 | 95 | ||
107 | /* | ||
108 | * IB. | ||
109 | */ | ||
110 | int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib) | 96 | int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib) |
111 | { | 97 | { |
112 | struct radeon_fence *fence; | 98 | struct radeon_fence *fence; |
113 | struct radeon_ib *nib; | 99 | unsigned cretry = 0; |
114 | int r = 0, i, c; | 100 | int r = 0, i, idx; |
115 | 101 | ||
116 | *ib = NULL; | 102 | *ib = NULL; |
103 | |||
117 | r = radeon_fence_create(rdev, &fence, ring); | 104 | r = radeon_fence_create(rdev, &fence, ring); |
118 | if (r) { | 105 | if (r) { |
119 | dev_err(rdev->dev, "failed to create fence for new IB\n"); | 106 | dev_err(rdev->dev, "failed to create fence for new IB\n"); |
120 | return r; | 107 | return r; |
121 | } | 108 | } |
109 | |||
122 | mutex_lock(&rdev->ib_pool.mutex); | 110 | mutex_lock(&rdev->ib_pool.mutex); |
123 | for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) { | 111 | idx = rdev->ib_pool.head_id; |
124 | i &= (RADEON_IB_POOL_SIZE - 1); | 112 | retry: |
125 | if (rdev->ib_pool.ibs[i].free) { | 113 | if (cretry > 5) { |
126 | nib = &rdev->ib_pool.ibs[i]; | 114 | dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); |
127 | break; | ||
128 | } | ||
129 | } | ||
130 | if (nib == NULL) { | ||
131 | /* This should never happen, it means we allocated all | ||
132 | * IB and haven't scheduled one yet, return EBUSY to | ||
133 | * userspace hoping that on ioctl recall we get better | ||
134 | * luck | ||
135 | */ | ||
136 | dev_err(rdev->dev, "no free indirect buffer !\n"); | ||
137 | mutex_unlock(&rdev->ib_pool.mutex); | 115 | mutex_unlock(&rdev->ib_pool.mutex); |
138 | radeon_fence_unref(&fence); | 116 | radeon_fence_unref(&fence); |
139 | return -EBUSY; | 117 | return -ENOMEM; |
140 | } | 118 | } |
141 | rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1); | 119 | cretry++; |
142 | nib->free = false; | 120 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
143 | if (nib->fence) { | 121 | radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]); |
144 | mutex_unlock(&rdev->ib_pool.mutex); | 122 | if (rdev->ib_pool.ibs[idx].fence == NULL) { |
145 | r = radeon_fence_wait(nib->fence, false); | 123 | r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, |
146 | if (r) { | 124 | &rdev->ib_pool.ibs[idx].sa_bo, |
147 | dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n", | 125 | 64*1024, 64); |
148 | nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw); | 126 | if (!r) { |
149 | mutex_lock(&rdev->ib_pool.mutex); | 127 | *ib = &rdev->ib_pool.ibs[idx]; |
150 | nib->free = true; | 128 | (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr; |
151 | mutex_unlock(&rdev->ib_pool.mutex); | 129 | (*ib)->ptr += ((*ib)->sa_bo.offset >> 2); |
152 | radeon_fence_unref(&fence); | 130 | (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr; |
153 | return r; | 131 | (*ib)->gpu_addr += (*ib)->sa_bo.offset; |
132 | (*ib)->fence = fence; | ||
133 | /* ib are most likely to be allocated in a ring fashion | ||
134 | * thus rdev->ib_pool.head_id should be the id of the | ||
135 | * oldest ib | ||
136 | */ | ||
137 | rdev->ib_pool.head_id = (1 + idx); | ||
138 | rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); | ||
139 | mutex_unlock(&rdev->ib_pool.mutex); | ||
140 | return 0; | ||
141 | } | ||
154 | } | 142 | } |
155 | mutex_lock(&rdev->ib_pool.mutex); | 143 | idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); |
144 | } | ||
145 | /* this should be rare event, ie all ib scheduled none signaled yet. | ||
146 | */ | ||
147 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | ||
148 | if (rdev->ib_pool.ibs[idx].fence) { | ||
149 | r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false); | ||
150 | if (!r) { | ||
151 | goto retry; | ||
152 | } | ||
153 | /* an error happened */ | ||
154 | break; | ||
155 | } | ||
156 | idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); | ||
156 | } | 157 | } |
157 | radeon_fence_unref(&nib->fence); | ||
158 | nib->fence = fence; | ||
159 | nib->length_dw = 0; | ||
160 | mutex_unlock(&rdev->ib_pool.mutex); | 158 | mutex_unlock(&rdev->ib_pool.mutex); |
161 | *ib = nib; | 159 | radeon_fence_unref(&fence); |
162 | return 0; | 160 | return r; |
163 | } | 161 | } |
164 | 162 | ||
165 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | 163 | void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) |
@@ -170,10 +168,11 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) | |||
170 | if (tmp == NULL) { | 168 | if (tmp == NULL) { |
171 | return; | 169 | return; |
172 | } | 170 | } |
173 | if (!tmp->fence->emitted) | ||
174 | radeon_fence_unref(&tmp->fence); | ||
175 | mutex_lock(&rdev->ib_pool.mutex); | 171 | mutex_lock(&rdev->ib_pool.mutex); |
176 | tmp->free = true; | 172 | if (tmp->fence && !tmp->fence->emitted) { |
173 | radeon_sa_bo_free(rdev, &tmp->sa_bo); | ||
174 | radeon_fence_unref(&tmp->fence); | ||
175 | } | ||
177 | mutex_unlock(&rdev->ib_pool.mutex); | 176 | mutex_unlock(&rdev->ib_pool.mutex); |
178 | } | 177 | } |
179 | 178 | ||
@@ -196,94 +195,73 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) | |||
196 | } | 195 | } |
197 | radeon_ring_ib_execute(rdev, ib->fence->ring, ib); | 196 | radeon_ring_ib_execute(rdev, ib->fence->ring, ib); |
198 | radeon_fence_emit(rdev, ib->fence); | 197 | radeon_fence_emit(rdev, ib->fence); |
199 | mutex_lock(&rdev->ib_pool.mutex); | ||
200 | /* once scheduled IB is considered free and protected by the fence */ | ||
201 | ib->free = true; | ||
202 | mutex_unlock(&rdev->ib_pool.mutex); | ||
203 | radeon_ring_unlock_commit(rdev, ring); | 198 | radeon_ring_unlock_commit(rdev, ring); |
204 | return 0; | 199 | return 0; |
205 | } | 200 | } |
206 | 201 | ||
207 | int radeon_ib_pool_init(struct radeon_device *rdev) | 202 | int radeon_ib_pool_init(struct radeon_device *rdev) |
208 | { | 203 | { |
209 | void *ptr; | 204 | int i, r; |
210 | uint64_t gpu_addr; | ||
211 | int i; | ||
212 | int r = 0; | ||
213 | 205 | ||
214 | if (rdev->ib_pool.robj) | 206 | mutex_lock(&rdev->ib_pool.mutex); |
207 | if (rdev->ib_pool.ready) { | ||
208 | mutex_unlock(&rdev->ib_pool.mutex); | ||
215 | return 0; | 209 | return 0; |
216 | INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); | ||
217 | /* Allocate 1M object buffer */ | ||
218 | r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024, | ||
219 | PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, | ||
220 | &rdev->ib_pool.robj); | ||
221 | if (r) { | ||
222 | DRM_ERROR("radeon: failed to ib pool (%d).\n", r); | ||
223 | return r; | ||
224 | } | 210 | } |
225 | r = radeon_bo_reserve(rdev->ib_pool.robj, false); | 211 | |
226 | if (unlikely(r != 0)) | 212 | r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager, |
227 | return r; | 213 | RADEON_IB_POOL_SIZE*64*1024, |
228 | r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); | 214 | RADEON_GEM_DOMAIN_GTT); |
229 | if (r) { | ||
230 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
231 | DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); | ||
232 | return r; | ||
233 | } | ||
234 | r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr); | ||
235 | radeon_bo_unreserve(rdev->ib_pool.robj); | ||
236 | if (r) { | 215 | if (r) { |
237 | DRM_ERROR("radeon: failed to map ib pool (%d).\n", r); | 216 | mutex_unlock(&rdev->ib_pool.mutex); |
238 | return r; | 217 | return r; |
239 | } | 218 | } |
240 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | ||
241 | unsigned offset; | ||
242 | 219 | ||
243 | offset = i * 64 * 1024; | 220 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
244 | rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset; | 221 | rdev->ib_pool.ibs[i].fence = NULL; |
245 | rdev->ib_pool.ibs[i].ptr = ptr + offset; | ||
246 | rdev->ib_pool.ibs[i].idx = i; | 222 | rdev->ib_pool.ibs[i].idx = i; |
247 | rdev->ib_pool.ibs[i].length_dw = 0; | 223 | rdev->ib_pool.ibs[i].length_dw = 0; |
248 | rdev->ib_pool.ibs[i].free = true; | 224 | INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list); |
249 | } | 225 | } |
250 | rdev->ib_pool.head_id = 0; | 226 | rdev->ib_pool.head_id = 0; |
251 | rdev->ib_pool.ready = true; | 227 | rdev->ib_pool.ready = true; |
252 | DRM_INFO("radeon: ib pool ready.\n"); | 228 | DRM_INFO("radeon: ib pool ready.\n"); |
229 | |||
253 | if (radeon_debugfs_ib_init(rdev)) { | 230 | if (radeon_debugfs_ib_init(rdev)) { |
254 | DRM_ERROR("Failed to register debugfs file for IB !\n"); | 231 | DRM_ERROR("Failed to register debugfs file for IB !\n"); |
255 | } | 232 | } |
256 | if (radeon_debugfs_ring_init(rdev)) { | 233 | if (radeon_debugfs_ring_init(rdev)) { |
257 | DRM_ERROR("Failed to register debugfs file for rings !\n"); | 234 | DRM_ERROR("Failed to register debugfs file for rings !\n"); |
258 | } | 235 | } |
259 | return r; | 236 | mutex_unlock(&rdev->ib_pool.mutex); |
237 | return 0; | ||
260 | } | 238 | } |
261 | 239 | ||
262 | void radeon_ib_pool_fini(struct radeon_device *rdev) | 240 | void radeon_ib_pool_fini(struct radeon_device *rdev) |
263 | { | 241 | { |
264 | int r; | 242 | unsigned i; |
265 | struct radeon_bo *robj; | ||
266 | 243 | ||
267 | if (!rdev->ib_pool.ready) { | ||
268 | return; | ||
269 | } | ||
270 | mutex_lock(&rdev->ib_pool.mutex); | 244 | mutex_lock(&rdev->ib_pool.mutex); |
271 | radeon_ib_bogus_cleanup(rdev); | 245 | if (rdev->ib_pool.ready) { |
272 | robj = rdev->ib_pool.robj; | 246 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
273 | rdev->ib_pool.robj = NULL; | 247 | radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo); |
274 | mutex_unlock(&rdev->ib_pool.mutex); | 248 | radeon_fence_unref(&rdev->ib_pool.ibs[i].fence); |
275 | |||
276 | if (robj) { | ||
277 | r = radeon_bo_reserve(robj, false); | ||
278 | if (likely(r == 0)) { | ||
279 | radeon_bo_kunmap(robj); | ||
280 | radeon_bo_unpin(robj); | ||
281 | radeon_bo_unreserve(robj); | ||
282 | } | 249 | } |
283 | radeon_bo_unref(&robj); | 250 | radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager); |
251 | rdev->ib_pool.ready = false; | ||
284 | } | 252 | } |
253 | mutex_unlock(&rdev->ib_pool.mutex); | ||
285 | } | 254 | } |
286 | 255 | ||
256 | int radeon_ib_pool_start(struct radeon_device *rdev) | ||
257 | { | ||
258 | return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager); | ||
259 | } | ||
260 | |||
261 | int radeon_ib_pool_suspend(struct radeon_device *rdev) | ||
262 | { | ||
263 | return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager); | ||
264 | } | ||
287 | 265 | ||
288 | /* | 266 | /* |
289 | * Ring. | 267 | * Ring. |
@@ -509,37 +487,8 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) | |||
509 | return 0; | 487 | return 0; |
510 | } | 488 | } |
511 | 489 | ||
512 | static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) | ||
513 | { | ||
514 | struct drm_info_node *node = (struct drm_info_node *) m->private; | ||
515 | struct radeon_device *rdev = node->info_ent->data; | ||
516 | struct radeon_ib *ib; | ||
517 | unsigned i; | ||
518 | |||
519 | mutex_lock(&rdev->ib_pool.mutex); | ||
520 | if (list_empty(&rdev->ib_pool.bogus_ib)) { | ||
521 | mutex_unlock(&rdev->ib_pool.mutex); | ||
522 | seq_printf(m, "no bogus IB recorded\n"); | ||
523 | return 0; | ||
524 | } | ||
525 | ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); | ||
526 | list_del_init(&ib->list); | ||
527 | mutex_unlock(&rdev->ib_pool.mutex); | ||
528 | seq_printf(m, "IB size %05u dwords\n", ib->length_dw); | ||
529 | for (i = 0; i < ib->length_dw; i++) { | ||
530 | seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); | ||
531 | } | ||
532 | vfree(ib->ptr); | ||
533 | kfree(ib); | ||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; | 490 | static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; |
538 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; | 491 | static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; |
539 | |||
540 | static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { | ||
541 | {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, | ||
542 | }; | ||
543 | #endif | 492 | #endif |
544 | 493 | ||
545 | int radeon_debugfs_ring_init(struct radeon_device *rdev) | 494 | int radeon_debugfs_ring_init(struct radeon_device *rdev) |
@@ -556,12 +505,7 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev) | |||
556 | { | 505 | { |
557 | #if defined(CONFIG_DEBUG_FS) | 506 | #if defined(CONFIG_DEBUG_FS) |
558 | unsigned i; | 507 | unsigned i; |
559 | int r; | ||
560 | 508 | ||
561 | radeon_debugfs_ib_bogus_info_list[0].data = rdev; | ||
562 | r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1); | ||
563 | if (r) | ||
564 | return r; | ||
565 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { | 509 | for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { |
566 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); | 510 | sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); |
567 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; | 511 | radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; |
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c new file mode 100644 index 000000000000..4cce47e7dc0d --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_sa.c | |||
@@ -0,0 +1,189 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Red Hat Inc. | ||
3 | * All Rights Reserved. | ||
4 | * | ||
5 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
6 | * copy of this software and associated documentation files (the | ||
7 | * "Software"), to deal in the Software without restriction, including | ||
8 | * without limitation the rights to use, copy, modify, merge, publish, | ||
9 | * distribute, sub license, and/or sell copies of the Software, and to | ||
10 | * permit persons to whom the Software is furnished to do so, subject to | ||
11 | * the following conditions: | ||
12 | * | ||
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
20 | * | ||
21 | * The above copyright notice and this permission notice (including the | ||
22 | * next paragraph) shall be included in all copies or substantial portions | ||
23 | * of the Software. | ||
24 | * | ||
25 | */ | ||
26 | /* | ||
27 | * Authors: | ||
28 | * Jerome Glisse <glisse@freedesktop.org> | ||
29 | */ | ||
30 | #include "drmP.h" | ||
31 | #include "drm.h" | ||
32 | #include "radeon.h" | ||
33 | |||
34 | int radeon_sa_bo_manager_init(struct radeon_device *rdev, | ||
35 | struct radeon_sa_manager *sa_manager, | ||
36 | unsigned size, u32 domain) | ||
37 | { | ||
38 | int r; | ||
39 | |||
40 | sa_manager->bo = NULL; | ||
41 | sa_manager->size = size; | ||
42 | sa_manager->domain = domain; | ||
43 | INIT_LIST_HEAD(&sa_manager->sa_bo); | ||
44 | |||
45 | r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true, | ||
46 | RADEON_GEM_DOMAIN_CPU, &sa_manager->bo); | ||
47 | if (r) { | ||
48 | dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); | ||
49 | return r; | ||
50 | } | ||
51 | |||
52 | return r; | ||
53 | } | ||
54 | |||
55 | void radeon_sa_bo_manager_fini(struct radeon_device *rdev, | ||
56 | struct radeon_sa_manager *sa_manager) | ||
57 | { | ||
58 | struct radeon_sa_bo *sa_bo, *tmp; | ||
59 | |||
60 | if (!list_empty(&sa_manager->sa_bo)) { | ||
61 | dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); | ||
62 | } | ||
63 | list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) { | ||
64 | list_del_init(&sa_bo->list); | ||
65 | } | ||
66 | radeon_bo_unref(&sa_manager->bo); | ||
67 | sa_manager->size = 0; | ||
68 | } | ||
69 | |||
70 | int radeon_sa_bo_manager_start(struct radeon_device *rdev, | ||
71 | struct radeon_sa_manager *sa_manager) | ||
72 | { | ||
73 | int r; | ||
74 | |||
75 | if (sa_manager->bo == NULL) { | ||
76 | dev_err(rdev->dev, "no bo for sa manager\n"); | ||
77 | return -EINVAL; | ||
78 | } | ||
79 | |||
80 | /* map the buffer */ | ||
81 | r = radeon_bo_reserve(sa_manager->bo, false); | ||
82 | if (r) { | ||
83 | dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); | ||
84 | return r; | ||
85 | } | ||
86 | r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); | ||
87 | if (r) { | ||
88 | radeon_bo_unreserve(sa_manager->bo); | ||
89 | dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); | ||
90 | return r; | ||
91 | } | ||
92 | r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); | ||
93 | radeon_bo_unreserve(sa_manager->bo); | ||
94 | return r; | ||
95 | } | ||
96 | |||
97 | int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, | ||
98 | struct radeon_sa_manager *sa_manager) | ||
99 | { | ||
100 | int r; | ||
101 | |||
102 | if (sa_manager->bo == NULL) { | ||
103 | dev_err(rdev->dev, "no bo for sa manager\n"); | ||
104 | return -EINVAL; | ||
105 | } | ||
106 | |||
107 | r = radeon_bo_reserve(sa_manager->bo, false); | ||
108 | if (!r) { | ||
109 | radeon_bo_kunmap(sa_manager->bo); | ||
110 | radeon_bo_unpin(sa_manager->bo); | ||
111 | radeon_bo_unreserve(sa_manager->bo); | ||
112 | } | ||
113 | return r; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Principe is simple, we keep a list of sub allocation in offset | ||
118 | * order (first entry has offset == 0, last entry has the highest | ||
119 | * offset). | ||
120 | * | ||
121 | * When allocating new object we first check if there is room at | ||
122 | * the end total_size - (last_object_offset + last_object_size) >= | ||
123 | * alloc_size. If so we allocate new object there. | ||
124 | * | ||
125 | * When there is not enough room at the end, we start waiting for | ||
126 | * each sub object until we reach object_offset+object_size >= | ||
127 | * alloc_size, this object then become the sub object we return. | ||
128 | * | ||
129 | * Alignment can't be bigger than page size | ||
130 | */ | ||
131 | int radeon_sa_bo_new(struct radeon_device *rdev, | ||
132 | struct radeon_sa_manager *sa_manager, | ||
133 | struct radeon_sa_bo *sa_bo, | ||
134 | unsigned size, unsigned align) | ||
135 | { | ||
136 | struct radeon_sa_bo *tmp; | ||
137 | struct list_head *head; | ||
138 | unsigned offset = 0, wasted = 0; | ||
139 | |||
140 | BUG_ON(align > RADEON_GPU_PAGE_SIZE); | ||
141 | BUG_ON(size > sa_manager->size); | ||
142 | |||
143 | /* no one ? */ | ||
144 | head = sa_manager->sa_bo.prev; | ||
145 | if (list_empty(&sa_manager->sa_bo)) { | ||
146 | goto out; | ||
147 | } | ||
148 | |||
149 | /* look for a hole big enough */ | ||
150 | offset = 0; | ||
151 | list_for_each_entry(tmp, &sa_manager->sa_bo, list) { | ||
152 | /* room before this object ? */ | ||
153 | if ((tmp->offset - offset) >= size) { | ||
154 | head = tmp->list.prev; | ||
155 | goto out; | ||
156 | } | ||
157 | offset = tmp->offset + tmp->size; | ||
158 | wasted = offset % align; | ||
159 | if (wasted) { | ||
160 | wasted = align - wasted; | ||
161 | } | ||
162 | offset += wasted; | ||
163 | } | ||
164 | /* room at the end ? */ | ||
165 | head = sa_manager->sa_bo.prev; | ||
166 | tmp = list_entry(head, struct radeon_sa_bo, list); | ||
167 | offset = tmp->offset + tmp->size; | ||
168 | wasted = offset % align; | ||
169 | if (wasted) { | ||
170 | wasted = align - wasted; | ||
171 | } | ||
172 | offset += wasted; | ||
173 | if ((sa_manager->size - offset) < size) { | ||
174 | /* failed to find somethings big enough */ | ||
175 | return -ENOMEM; | ||
176 | } | ||
177 | |||
178 | out: | ||
179 | sa_bo->manager = sa_manager; | ||
180 | sa_bo->offset = offset; | ||
181 | sa_bo->size = size; | ||
182 | list_add(&sa_bo->list, head); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo) | ||
187 | { | ||
188 | list_del_init(&sa_bo->list); | ||
189 | } | ||
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 5c86d5161fdb..b0ce84a20a68 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c | |||
@@ -425,11 +425,18 @@ static int rs400_startup(struct radeon_device *rdev) | |||
425 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 425 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
426 | return r; | 426 | return r; |
427 | } | 427 | } |
428 | r = r100_ib_init(rdev); | 428 | |
429 | r = radeon_ib_pool_start(rdev); | ||
430 | if (r) | ||
431 | return r; | ||
432 | |||
433 | r = r100_ib_test(rdev); | ||
429 | if (r) { | 434 | if (r) { |
430 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 435 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
436 | rdev->accel_working = false; | ||
431 | return r; | 437 | return r; |
432 | } | 438 | } |
439 | |||
433 | return 0; | 440 | return 0; |
434 | } | 441 | } |
435 | 442 | ||
@@ -453,11 +460,14 @@ int rs400_resume(struct radeon_device *rdev) | |||
453 | r300_clock_startup(rdev); | 460 | r300_clock_startup(rdev); |
454 | /* Initialize surface registers */ | 461 | /* Initialize surface registers */ |
455 | radeon_surface_init(rdev); | 462 | radeon_surface_init(rdev); |
463 | |||
464 | rdev->accel_working = true; | ||
456 | return rs400_startup(rdev); | 465 | return rs400_startup(rdev); |
457 | } | 466 | } |
458 | 467 | ||
459 | int rs400_suspend(struct radeon_device *rdev) | 468 | int rs400_suspend(struct radeon_device *rdev) |
460 | { | 469 | { |
470 | radeon_ib_pool_suspend(rdev); | ||
461 | r100_cp_disable(rdev); | 471 | r100_cp_disable(rdev); |
462 | radeon_wb_disable(rdev); | 472 | radeon_wb_disable(rdev); |
463 | r100_irq_disable(rdev); | 473 | r100_irq_disable(rdev); |
@@ -536,7 +546,14 @@ int rs400_init(struct radeon_device *rdev) | |||
536 | if (r) | 546 | if (r) |
537 | return r; | 547 | return r; |
538 | r300_set_reg_safe(rdev); | 548 | r300_set_reg_safe(rdev); |
549 | |||
550 | r = radeon_ib_pool_init(rdev); | ||
539 | rdev->accel_working = true; | 551 | rdev->accel_working = true; |
552 | if (r) { | ||
553 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
554 | rdev->accel_working = false; | ||
555 | } | ||
556 | |||
540 | r = rs400_startup(rdev); | 557 | r = rs400_startup(rdev); |
541 | if (r) { | 558 | if (r) { |
542 | /* Somethings want wront with the accel init stop accel */ | 559 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 8a52cf007ff0..ca6d5b6eaaac 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -864,15 +864,21 @@ static int rs600_startup(struct radeon_device *rdev) | |||
864 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 864 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
865 | return r; | 865 | return r; |
866 | } | 866 | } |
867 | r = r100_ib_init(rdev); | 867 | |
868 | r = r600_audio_init(rdev); | ||
868 | if (r) { | 869 | if (r) { |
869 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 870 | dev_err(rdev->dev, "failed initializing audio\n"); |
870 | return r; | 871 | return r; |
871 | } | 872 | } |
872 | 873 | ||
873 | r = r600_audio_init(rdev); | 874 | r = radeon_ib_pool_start(rdev); |
875 | if (r) | ||
876 | return r; | ||
877 | |||
878 | r = r100_ib_test(rdev); | ||
874 | if (r) { | 879 | if (r) { |
875 | dev_err(rdev->dev, "failed initializing audio\n"); | 880 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
881 | rdev->accel_working = false; | ||
876 | return r; | 882 | return r; |
877 | } | 883 | } |
878 | 884 | ||
@@ -897,11 +903,14 @@ int rs600_resume(struct radeon_device *rdev) | |||
897 | rv515_clock_startup(rdev); | 903 | rv515_clock_startup(rdev); |
898 | /* Initialize surface registers */ | 904 | /* Initialize surface registers */ |
899 | radeon_surface_init(rdev); | 905 | radeon_surface_init(rdev); |
906 | |||
907 | rdev->accel_working = true; | ||
900 | return rs600_startup(rdev); | 908 | return rs600_startup(rdev); |
901 | } | 909 | } |
902 | 910 | ||
903 | int rs600_suspend(struct radeon_device *rdev) | 911 | int rs600_suspend(struct radeon_device *rdev) |
904 | { | 912 | { |
913 | radeon_ib_pool_suspend(rdev); | ||
905 | r600_audio_fini(rdev); | 914 | r600_audio_fini(rdev); |
906 | r100_cp_disable(rdev); | 915 | r100_cp_disable(rdev); |
907 | radeon_wb_disable(rdev); | 916 | radeon_wb_disable(rdev); |
@@ -982,7 +991,14 @@ int rs600_init(struct radeon_device *rdev) | |||
982 | if (r) | 991 | if (r) |
983 | return r; | 992 | return r; |
984 | rs600_set_safe_registers(rdev); | 993 | rs600_set_safe_registers(rdev); |
994 | |||
995 | r = radeon_ib_pool_init(rdev); | ||
985 | rdev->accel_working = true; | 996 | rdev->accel_working = true; |
997 | if (r) { | ||
998 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
999 | rdev->accel_working = false; | ||
1000 | } | ||
1001 | |||
986 | r = rs600_startup(rdev); | 1002 | r = rs600_startup(rdev); |
987 | if (r) { | 1003 | if (r) { |
988 | /* Somethings want wront with the accel init stop accel */ | 1004 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index ae941d196d3f..4f24a0fa8c82 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -636,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev) | |||
636 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 636 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
637 | return r; | 637 | return r; |
638 | } | 638 | } |
639 | r = r100_ib_init(rdev); | 639 | |
640 | r = r600_audio_init(rdev); | ||
640 | if (r) { | 641 | if (r) { |
641 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 642 | dev_err(rdev->dev, "failed initializing audio\n"); |
642 | return r; | 643 | return r; |
643 | } | 644 | } |
644 | 645 | ||
645 | r = r600_audio_init(rdev); | 646 | r = radeon_ib_pool_start(rdev); |
647 | if (r) | ||
648 | return r; | ||
649 | |||
650 | r = r100_ib_test(rdev); | ||
646 | if (r) { | 651 | if (r) { |
647 | dev_err(rdev->dev, "failed initializing audio\n"); | 652 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
653 | rdev->accel_working = false; | ||
648 | return r; | 654 | return r; |
649 | } | 655 | } |
650 | 656 | ||
@@ -669,11 +675,14 @@ int rs690_resume(struct radeon_device *rdev) | |||
669 | rv515_clock_startup(rdev); | 675 | rv515_clock_startup(rdev); |
670 | /* Initialize surface registers */ | 676 | /* Initialize surface registers */ |
671 | radeon_surface_init(rdev); | 677 | radeon_surface_init(rdev); |
678 | |||
679 | rdev->accel_working = true; | ||
672 | return rs690_startup(rdev); | 680 | return rs690_startup(rdev); |
673 | } | 681 | } |
674 | 682 | ||
675 | int rs690_suspend(struct radeon_device *rdev) | 683 | int rs690_suspend(struct radeon_device *rdev) |
676 | { | 684 | { |
685 | radeon_ib_pool_suspend(rdev); | ||
677 | r600_audio_fini(rdev); | 686 | r600_audio_fini(rdev); |
678 | r100_cp_disable(rdev); | 687 | r100_cp_disable(rdev); |
679 | radeon_wb_disable(rdev); | 688 | radeon_wb_disable(rdev); |
@@ -755,7 +764,14 @@ int rs690_init(struct radeon_device *rdev) | |||
755 | if (r) | 764 | if (r) |
756 | return r; | 765 | return r; |
757 | rs600_set_safe_registers(rdev); | 766 | rs600_set_safe_registers(rdev); |
767 | |||
768 | r = radeon_ib_pool_init(rdev); | ||
758 | rdev->accel_working = true; | 769 | rdev->accel_working = true; |
770 | if (r) { | ||
771 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
772 | rdev->accel_working = false; | ||
773 | } | ||
774 | |||
759 | r = rs690_startup(rdev); | 775 | r = rs690_startup(rdev); |
760 | if (r) { | 776 | if (r) { |
761 | /* Somethings want wront with the accel init stop accel */ | 777 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 21d90d9fe11c..880637fd1946 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c | |||
@@ -408,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev) | |||
408 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); | 408 | dev_err(rdev->dev, "failed initializing CP (%d).\n", r); |
409 | return r; | 409 | return r; |
410 | } | 410 | } |
411 | r = r100_ib_init(rdev); | 411 | |
412 | r = radeon_ib_pool_start(rdev); | ||
413 | if (r) | ||
414 | return r; | ||
415 | |||
416 | r = r100_ib_test(rdev); | ||
412 | if (r) { | 417 | if (r) { |
413 | dev_err(rdev->dev, "failed initializing IB (%d).\n", r); | 418 | dev_err(rdev->dev, "failed testing IB (%d).\n", r); |
419 | rdev->accel_working = false; | ||
414 | return r; | 420 | return r; |
415 | } | 421 | } |
416 | return 0; | 422 | return 0; |
@@ -435,6 +441,8 @@ int rv515_resume(struct radeon_device *rdev) | |||
435 | rv515_clock_startup(rdev); | 441 | rv515_clock_startup(rdev); |
436 | /* Initialize surface registers */ | 442 | /* Initialize surface registers */ |
437 | radeon_surface_init(rdev); | 443 | radeon_surface_init(rdev); |
444 | |||
445 | rdev->accel_working = true; | ||
438 | return rv515_startup(rdev); | 446 | return rv515_startup(rdev); |
439 | } | 447 | } |
440 | 448 | ||
@@ -531,7 +539,14 @@ int rv515_init(struct radeon_device *rdev) | |||
531 | if (r) | 539 | if (r) |
532 | return r; | 540 | return r; |
533 | rv515_set_safe_registers(rdev); | 541 | rv515_set_safe_registers(rdev); |
542 | |||
543 | r = radeon_ib_pool_init(rdev); | ||
534 | rdev->accel_working = true; | 544 | rdev->accel_working = true; |
545 | if (r) { | ||
546 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
547 | rdev->accel_working = false; | ||
548 | } | ||
549 | |||
535 | r = rv515_startup(rdev); | 550 | r = rv515_startup(rdev); |
536 | if (r) { | 551 | if (r) { |
537 | /* Somethings want wront with the accel init stop accel */ | 552 | /* Somethings want wront with the accel init stop accel */ |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index d854fbfa5a52..a1668b659ddd 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -1110,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1110 | if (r) | 1110 | if (r) |
1111 | return r; | 1111 | return r; |
1112 | 1112 | ||
1113 | r = radeon_ib_pool_start(rdev); | ||
1114 | if (r) | ||
1115 | return r; | ||
1116 | |||
1117 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1118 | if (r) { | ||
1119 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
1120 | rdev->accel_working = false; | ||
1121 | return r; | ||
1122 | } | ||
1123 | |||
1113 | return 0; | 1124 | return 0; |
1114 | } | 1125 | } |
1115 | 1126 | ||
@@ -1124,18 +1135,13 @@ int rv770_resume(struct radeon_device *rdev) | |||
1124 | /* post card */ | 1135 | /* post card */ |
1125 | atom_asic_init(rdev->mode_info.atom_context); | 1136 | atom_asic_init(rdev->mode_info.atom_context); |
1126 | 1137 | ||
1138 | rdev->accel_working = true; | ||
1127 | r = rv770_startup(rdev); | 1139 | r = rv770_startup(rdev); |
1128 | if (r) { | 1140 | if (r) { |
1129 | DRM_ERROR("r600 startup failed on resume\n"); | 1141 | DRM_ERROR("r600 startup failed on resume\n"); |
1130 | return r; | 1142 | return r; |
1131 | } | 1143 | } |
1132 | 1144 | ||
1133 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1134 | if (r) { | ||
1135 | DRM_ERROR("radeon: failed testing IB (%d).\n", r); | ||
1136 | return r; | ||
1137 | } | ||
1138 | |||
1139 | r = r600_audio_init(rdev); | 1145 | r = r600_audio_init(rdev); |
1140 | if (r) { | 1146 | if (r) { |
1141 | dev_err(rdev->dev, "radeon: audio init failed\n"); | 1147 | dev_err(rdev->dev, "radeon: audio init failed\n"); |
@@ -1149,13 +1155,14 @@ int rv770_resume(struct radeon_device *rdev) | |||
1149 | int rv770_suspend(struct radeon_device *rdev) | 1155 | int rv770_suspend(struct radeon_device *rdev) |
1150 | { | 1156 | { |
1151 | r600_audio_fini(rdev); | 1157 | r600_audio_fini(rdev); |
1158 | radeon_ib_pool_suspend(rdev); | ||
1159 | r600_blit_suspend(rdev); | ||
1152 | /* FIXME: we should wait for ring to be empty */ | 1160 | /* FIXME: we should wait for ring to be empty */ |
1153 | r700_cp_stop(rdev); | 1161 | r700_cp_stop(rdev); |
1154 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; | 1162 | rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; |
1155 | r600_irq_suspend(rdev); | 1163 | r600_irq_suspend(rdev); |
1156 | radeon_wb_disable(rdev); | 1164 | radeon_wb_disable(rdev); |
1157 | rv770_pcie_gart_disable(rdev); | 1165 | rv770_pcie_gart_disable(rdev); |
1158 | r600_blit_suspend(rdev); | ||
1159 | 1166 | ||
1160 | return 0; | 1167 | return 0; |
1161 | } | 1168 | } |
@@ -1234,30 +1241,24 @@ int rv770_init(struct radeon_device *rdev) | |||
1234 | if (r) | 1241 | if (r) |
1235 | return r; | 1242 | return r; |
1236 | 1243 | ||
1244 | r = radeon_ib_pool_init(rdev); | ||
1237 | rdev->accel_working = true; | 1245 | rdev->accel_working = true; |
1246 | if (r) { | ||
1247 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
1248 | rdev->accel_working = false; | ||
1249 | } | ||
1250 | |||
1238 | r = rv770_startup(rdev); | 1251 | r = rv770_startup(rdev); |
1239 | if (r) { | 1252 | if (r) { |
1240 | dev_err(rdev->dev, "disabling GPU acceleration\n"); | 1253 | dev_err(rdev->dev, "disabling GPU acceleration\n"); |
1241 | r700_cp_fini(rdev); | 1254 | r700_cp_fini(rdev); |
1242 | r600_irq_fini(rdev); | 1255 | r600_irq_fini(rdev); |
1243 | radeon_wb_fini(rdev); | 1256 | radeon_wb_fini(rdev); |
1257 | r100_ib_fini(rdev); | ||
1244 | radeon_irq_kms_fini(rdev); | 1258 | radeon_irq_kms_fini(rdev); |
1245 | rv770_pcie_gart_fini(rdev); | 1259 | rv770_pcie_gart_fini(rdev); |
1246 | rdev->accel_working = false; | 1260 | rdev->accel_working = false; |
1247 | } | 1261 | } |
1248 | if (rdev->accel_working) { | ||
1249 | r = radeon_ib_pool_init(rdev); | ||
1250 | if (r) { | ||
1251 | dev_err(rdev->dev, "IB initialization failed (%d).\n", r); | ||
1252 | rdev->accel_working = false; | ||
1253 | } else { | ||
1254 | r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX); | ||
1255 | if (r) { | ||
1256 | dev_err(rdev->dev, "IB test failed (%d).\n", r); | ||
1257 | rdev->accel_working = false; | ||
1258 | } | ||
1259 | } | ||
1260 | } | ||
1261 | 1262 | ||
1262 | r = r600_audio_init(rdev); | 1263 | r = r600_audio_init(rdev); |
1263 | if (r) { | 1264 | if (r) { |
@@ -1274,7 +1275,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
1274 | r700_cp_fini(rdev); | 1275 | r700_cp_fini(rdev); |
1275 | r600_irq_fini(rdev); | 1276 | r600_irq_fini(rdev); |
1276 | radeon_wb_fini(rdev); | 1277 | radeon_wb_fini(rdev); |
1277 | radeon_ib_pool_fini(rdev); | 1278 | r100_ib_fini(rdev); |
1278 | radeon_irq_kms_fini(rdev); | 1279 | radeon_irq_kms_fini(rdev); |
1279 | rv770_pcie_gart_fini(rdev); | 1280 | rv770_pcie_gart_fini(rdev); |
1280 | r600_vram_scratch_fini(rdev); | 1281 | r600_vram_scratch_fini(rdev); |