diff options
author | Jerome Glisse <jglisse@redhat.com> | 2009-11-20 08:29:23 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2009-12-01 23:00:18 -0500 |
commit | 4c7886791264f03428d5424befb1b96f08fc90f4 (patch) | |
tree | 2c644931001b06969fb3038e7beb68db436c4872 /drivers/gpu/drm/radeon/r600.c | |
parent | 1614f8b17b8cc3ad143541d41569623d30dbc9ec (diff) |
drm/radeon/kms: Rework radeon object handling
The locking & protection of radeon object was somewhat messy.
This patch completely rework it to now use ttm reserve as a
protection for the radeon object structure member. It also
shrink down the various radeon object structure by removing
field which were redondant with the ttm information. Last it
converts few simple functions to inline which should with
performances.
airlied: rebase on top of r600 and other changes.
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 100 |
1 files changed, 68 insertions, 32 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 5966027aa967..26947e8dadcb 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -184,7 +184,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) | |||
184 | void r600_pcie_gart_disable(struct radeon_device *rdev) | 184 | void r600_pcie_gart_disable(struct radeon_device *rdev) |
185 | { | 185 | { |
186 | u32 tmp; | 186 | u32 tmp; |
187 | int i; | 187 | int i, r; |
188 | 188 | ||
189 | /* Disable all tables */ | 189 | /* Disable all tables */ |
190 | for (i = 0; i < 7; i++) | 190 | for (i = 0; i < 7; i++) |
@@ -212,8 +212,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) | |||
212 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); | 212 | WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); |
213 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); | 213 | WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); |
214 | if (rdev->gart.table.vram.robj) { | 214 | if (rdev->gart.table.vram.robj) { |
215 | radeon_object_kunmap(rdev->gart.table.vram.robj); | 215 | r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); |
216 | radeon_object_unpin(rdev->gart.table.vram.robj); | 216 | if (likely(r == 0)) { |
217 | radeon_bo_kunmap(rdev->gart.table.vram.robj); | ||
218 | radeon_bo_unpin(rdev->gart.table.vram.robj); | ||
219 | radeon_bo_unreserve(rdev->gart.table.vram.robj); | ||
220 | } | ||
217 | } | 221 | } |
218 | } | 222 | } |
219 | 223 | ||
@@ -1436,10 +1440,16 @@ int r600_ring_test(struct radeon_device *rdev) | |||
1436 | 1440 | ||
1437 | void r600_wb_disable(struct radeon_device *rdev) | 1441 | void r600_wb_disable(struct radeon_device *rdev) |
1438 | { | 1442 | { |
1443 | int r; | ||
1444 | |||
1439 | WREG32(SCRATCH_UMSK, 0); | 1445 | WREG32(SCRATCH_UMSK, 0); |
1440 | if (rdev->wb.wb_obj) { | 1446 | if (rdev->wb.wb_obj) { |
1441 | radeon_object_kunmap(rdev->wb.wb_obj); | 1447 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); |
1442 | radeon_object_unpin(rdev->wb.wb_obj); | 1448 | if (unlikely(r != 0)) |
1449 | return; | ||
1450 | radeon_bo_kunmap(rdev->wb.wb_obj); | ||
1451 | radeon_bo_unpin(rdev->wb.wb_obj); | ||
1452 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
1443 | } | 1453 | } |
1444 | } | 1454 | } |
1445 | 1455 | ||
@@ -1447,7 +1457,7 @@ void r600_wb_fini(struct radeon_device *rdev) | |||
1447 | { | 1457 | { |
1448 | r600_wb_disable(rdev); | 1458 | r600_wb_disable(rdev); |
1449 | if (rdev->wb.wb_obj) { | 1459 | if (rdev->wb.wb_obj) { |
1450 | radeon_object_unref(&rdev->wb.wb_obj); | 1460 | radeon_bo_unref(&rdev->wb.wb_obj); |
1451 | rdev->wb.wb = NULL; | 1461 | rdev->wb.wb = NULL; |
1452 | rdev->wb.wb_obj = NULL; | 1462 | rdev->wb.wb_obj = NULL; |
1453 | } | 1463 | } |
@@ -1458,22 +1468,29 @@ int r600_wb_enable(struct radeon_device *rdev) | |||
1458 | int r; | 1468 | int r; |
1459 | 1469 | ||
1460 | if (rdev->wb.wb_obj == NULL) { | 1470 | if (rdev->wb.wb_obj == NULL) { |
1461 | r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, | 1471 | r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, |
1462 | RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); | 1472 | RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); |
1463 | if (r) { | 1473 | if (r) { |
1464 | dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); | 1474 | dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); |
1475 | return r; | ||
1476 | } | ||
1477 | r = radeon_bo_reserve(rdev->wb.wb_obj, false); | ||
1478 | if (unlikely(r != 0)) { | ||
1479 | r600_wb_fini(rdev); | ||
1465 | return r; | 1480 | return r; |
1466 | } | 1481 | } |
1467 | r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, | 1482 | r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, |
1468 | &rdev->wb.gpu_addr); | 1483 | &rdev->wb.gpu_addr); |
1469 | if (r) { | 1484 | if (r) { |
1470 | dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); | 1485 | radeon_bo_unreserve(rdev->wb.wb_obj); |
1486 | dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r); | ||
1471 | r600_wb_fini(rdev); | 1487 | r600_wb_fini(rdev); |
1472 | return r; | 1488 | return r; |
1473 | } | 1489 | } |
1474 | r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); | 1490 | r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); |
1491 | radeon_bo_unreserve(rdev->wb.wb_obj); | ||
1475 | if (r) { | 1492 | if (r) { |
1476 | dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); | 1493 | dev_warn(rdev->dev, "(%d) map WB bo failed\n", r); |
1477 | r600_wb_fini(rdev); | 1494 | r600_wb_fini(rdev); |
1478 | return r; | 1495 | return r; |
1479 | } | 1496 | } |
@@ -1563,10 +1580,14 @@ int r600_startup(struct radeon_device *rdev) | |||
1563 | } | 1580 | } |
1564 | r600_gpu_init(rdev); | 1581 | r600_gpu_init(rdev); |
1565 | 1582 | ||
1566 | r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | 1583 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1567 | &rdev->r600_blit.shader_gpu_addr); | 1584 | if (unlikely(r != 0)) |
1585 | return r; | ||
1586 | r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, | ||
1587 | &rdev->r600_blit.shader_gpu_addr); | ||
1588 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1568 | if (r) { | 1589 | if (r) { |
1569 | DRM_ERROR("failed to pin blit object %d\n", r); | 1590 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
1570 | return r; | 1591 | return r; |
1571 | } | 1592 | } |
1572 | 1593 | ||
@@ -1639,13 +1660,19 @@ int r600_resume(struct radeon_device *rdev) | |||
1639 | 1660 | ||
1640 | int r600_suspend(struct radeon_device *rdev) | 1661 | int r600_suspend(struct radeon_device *rdev) |
1641 | { | 1662 | { |
1663 | int r; | ||
1664 | |||
1642 | /* FIXME: we should wait for ring to be empty */ | 1665 | /* FIXME: we should wait for ring to be empty */ |
1643 | r600_cp_stop(rdev); | 1666 | r600_cp_stop(rdev); |
1644 | rdev->cp.ready = false; | 1667 | rdev->cp.ready = false; |
1645 | r600_wb_disable(rdev); | 1668 | r600_wb_disable(rdev); |
1646 | r600_pcie_gart_disable(rdev); | 1669 | r600_pcie_gart_disable(rdev); |
1647 | /* unpin shaders bo */ | 1670 | /* unpin shaders bo */ |
1648 | radeon_object_unpin(rdev->r600_blit.shader_obj); | 1671 | r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); |
1672 | if (unlikely(r != 0)) | ||
1673 | return r; | ||
1674 | radeon_bo_unpin(rdev->r600_blit.shader_obj); | ||
1675 | radeon_bo_unreserve(rdev->r600_blit.shader_obj); | ||
1649 | return 0; | 1676 | return 0; |
1650 | } | 1677 | } |
1651 | 1678 | ||
@@ -1710,7 +1737,7 @@ int r600_init(struct radeon_device *rdev) | |||
1710 | if (r) | 1737 | if (r) |
1711 | return r; | 1738 | return r; |
1712 | /* Memory manager */ | 1739 | /* Memory manager */ |
1713 | r = radeon_object_init(rdev); | 1740 | r = radeon_bo_init(rdev); |
1714 | if (r) | 1741 | if (r) |
1715 | return r; | 1742 | return r; |
1716 | 1743 | ||
@@ -1782,7 +1809,7 @@ void r600_fini(struct radeon_device *rdev) | |||
1782 | radeon_clocks_fini(rdev); | 1809 | radeon_clocks_fini(rdev); |
1783 | if (rdev->flags & RADEON_IS_AGP) | 1810 | if (rdev->flags & RADEON_IS_AGP) |
1784 | radeon_agp_fini(rdev); | 1811 | radeon_agp_fini(rdev); |
1785 | radeon_object_fini(rdev); | 1812 | radeon_bo_fini(rdev); |
1786 | radeon_atombios_fini(rdev); | 1813 | radeon_atombios_fini(rdev); |
1787 | kfree(rdev->bios); | 1814 | kfree(rdev->bios); |
1788 | rdev->bios = NULL; | 1815 | rdev->bios = NULL; |
@@ -1897,24 +1924,28 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | |||
1897 | rdev->ih.ring_size = ring_size; | 1924 | rdev->ih.ring_size = ring_size; |
1898 | /* Allocate ring buffer */ | 1925 | /* Allocate ring buffer */ |
1899 | if (rdev->ih.ring_obj == NULL) { | 1926 | if (rdev->ih.ring_obj == NULL) { |
1900 | r = radeon_object_create(rdev, NULL, rdev->ih.ring_size, | 1927 | r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, |
1901 | true, | 1928 | true, |
1902 | RADEON_GEM_DOMAIN_GTT, | 1929 | RADEON_GEM_DOMAIN_GTT, |
1903 | false, | 1930 | &rdev->ih.ring_obj); |
1904 | &rdev->ih.ring_obj); | ||
1905 | if (r) { | 1931 | if (r) { |
1906 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); | 1932 | DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); |
1907 | return r; | 1933 | return r; |
1908 | } | 1934 | } |
1909 | r = radeon_object_pin(rdev->ih.ring_obj, | 1935 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); |
1910 | RADEON_GEM_DOMAIN_GTT, | 1936 | if (unlikely(r != 0)) |
1911 | &rdev->ih.gpu_addr); | 1937 | return r; |
1938 | r = radeon_bo_pin(rdev->ih.ring_obj, | ||
1939 | RADEON_GEM_DOMAIN_GTT, | ||
1940 | &rdev->ih.gpu_addr); | ||
1912 | if (r) { | 1941 | if (r) { |
1942 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
1913 | DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); | 1943 | DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r); |
1914 | return r; | 1944 | return r; |
1915 | } | 1945 | } |
1916 | r = radeon_object_kmap(rdev->ih.ring_obj, | 1946 | r = radeon_bo_kmap(rdev->ih.ring_obj, |
1917 | (void **)&rdev->ih.ring); | 1947 | (void **)&rdev->ih.ring); |
1948 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
1918 | if (r) { | 1949 | if (r) { |
1919 | DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); | 1950 | DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r); |
1920 | return r; | 1951 | return r; |
@@ -1928,10 +1959,15 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size) | |||
1928 | 1959 | ||
1929 | static void r600_ih_ring_fini(struct radeon_device *rdev) | 1960 | static void r600_ih_ring_fini(struct radeon_device *rdev) |
1930 | { | 1961 | { |
1962 | int r; | ||
1931 | if (rdev->ih.ring_obj) { | 1963 | if (rdev->ih.ring_obj) { |
1932 | radeon_object_kunmap(rdev->ih.ring_obj); | 1964 | r = radeon_bo_reserve(rdev->ih.ring_obj, false); |
1933 | radeon_object_unpin(rdev->ih.ring_obj); | 1965 | if (likely(r == 0)) { |
1934 | radeon_object_unref(&rdev->ih.ring_obj); | 1966 | radeon_bo_kunmap(rdev->ih.ring_obj); |
1967 | radeon_bo_unpin(rdev->ih.ring_obj); | ||
1968 | radeon_bo_unreserve(rdev->ih.ring_obj); | ||
1969 | } | ||
1970 | radeon_bo_unref(&rdev->ih.ring_obj); | ||
1935 | rdev->ih.ring = NULL; | 1971 | rdev->ih.ring = NULL; |
1936 | rdev->ih.ring_obj = NULL; | 1972 | rdev->ih.ring_obj = NULL; |
1937 | } | 1973 | } |