diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 18:40:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-06-01 18:40:29 -0400 |
commit | 3ded7acfddb3d8dad4a1490a3a75e9d8bc975c35 (patch) | |
tree | c2970512b06e8a32f5dc671b7fa46186d8ec2a56 /drivers/gpu | |
parent | 37b22400f86e83076c8e540b65e8b7e3f1f63e6b (diff) | |
parent | 47819ba234d41465b76f179ba674ff549255a5d2 (diff) |
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie:
"A bunch of fixes:
- vmware memory corruption
- ttm spinlock balance
- cirrus/mgag200 work in the presence of efifb
and finally Alex and Jerome managed to track down a magic set of bits
that on certain rv740 and evergreen cards allow the correct use of the
complete set of render backends, this makes the cards operate
correctly in a number of scenarios we had issues in before, it also
manages to boost speed on benchmarks my large amounts on these
specific gpus."
* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
drm/edid: Make the header fixup threshold tunable
drm/radeon: fix regression in UMS CS ioctl
drm/vmwgfx: Fix nasty write past alloced memory area
drm/ttm: Fix spinlock imbalance
drm/radeon: fixup tiling group size and backendmap on r6xx-r9xx (v4)
drm/radeon: fix HD6790, HD6570 backend programming
drm/radeon: properly program gart on rv740, juniper, cypress, barts, hemlock
drm/radeon: fix bank information in tiling config
drm/mgag200: kick off conflicting framebuffers earlier.
drm/cirrus: kick out conflicting framebuffers earlier
cirrus: avoid crash if driver fails to load
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/cirrus/cirrus_drv.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/cirrus/cirrus_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/cirrus/cirrus_ttm.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_edid.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/mgag200/mgag200_drv.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen.c | 382 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/evergreend.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/ni.c | 360 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/nid.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 199 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/r600d.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_cs.c | 31 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv770.c | 274 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/rv770d.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | 2 |
17 files changed, 321 insertions, 1017 deletions
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index d7038230b71e..7053140c6596 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c | |||
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { | |||
35 | {0,} | 35 | {0,} |
36 | }; | 36 | }; |
37 | 37 | ||
38 | |||
39 | static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) | ||
40 | { | ||
41 | struct apertures_struct *ap; | ||
42 | bool primary = false; | ||
43 | |||
44 | ap = alloc_apertures(1); | ||
45 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
46 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
47 | |||
48 | #ifdef CONFIG_X86 | ||
49 | primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
50 | #endif | ||
51 | remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); | ||
52 | kfree(ap); | ||
53 | } | ||
54 | |||
38 | static int __devinit | 55 | static int __devinit |
39 | cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 56 | cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
40 | { | 57 | { |
58 | cirrus_kick_out_firmware_fb(pdev); | ||
59 | |||
41 | return drm_get_pci_dev(pdev, ent, &driver); | 60 | return drm_get_pci_dev(pdev, ent, &driver); |
42 | } | 61 | } |
43 | 62 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 21bdfa8836f7..64ea597cb6d3 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h | |||
@@ -145,7 +145,7 @@ struct cirrus_device { | |||
145 | struct ttm_bo_device bdev; | 145 | struct ttm_bo_device bdev; |
146 | atomic_t validate_sequence; | 146 | atomic_t validate_sequence; |
147 | } ttm; | 147 | } ttm; |
148 | 148 | bool mm_inited; | |
149 | }; | 149 | }; |
150 | 150 | ||
151 | 151 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 2ebcd11a5023..50e170f879de 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus) | |||
275 | pci_resource_len(dev->pdev, 0), | 275 | pci_resource_len(dev->pdev, 0), |
276 | DRM_MTRR_WC); | 276 | DRM_MTRR_WC); |
277 | 277 | ||
278 | cirrus->mm_inited = true; | ||
278 | return 0; | 279 | return 0; |
279 | } | 280 | } |
280 | 281 | ||
281 | void cirrus_mm_fini(struct cirrus_device *cirrus) | 282 | void cirrus_mm_fini(struct cirrus_device *cirrus) |
282 | { | 283 | { |
283 | struct drm_device *dev = cirrus->dev; | 284 | struct drm_device *dev = cirrus->dev; |
285 | |||
286 | if (!cirrus->mm_inited) | ||
287 | return; | ||
288 | |||
284 | ttm_bo_device_release(&cirrus->ttm.bdev); | 289 | ttm_bo_device_release(&cirrus->ttm.bdev); |
285 | 290 | ||
286 | cirrus_ttm_global_release(cirrus); | 291 | cirrus_ttm_global_release(cirrus); |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c3b5139eba7f..eb92fe257a39 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/i2c.h> | 32 | #include <linux/i2c.h> |
33 | #include <linux/export.h> | 33 | #include <linux/module.h> |
34 | #include "drmP.h" | 34 | #include "drmP.h" |
35 | #include "drm_edid.h" | 35 | #include "drm_edid.h" |
36 | #include "drm_edid_modes.h" | 36 | #include "drm_edid_modes.h" |
@@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid) | |||
149 | } | 149 | } |
150 | EXPORT_SYMBOL(drm_edid_header_is_valid); | 150 | EXPORT_SYMBOL(drm_edid_header_is_valid); |
151 | 151 | ||
152 | static int edid_fixup __read_mostly = 6; | ||
153 | module_param_named(edid_fixup, edid_fixup, int, 0400); | ||
154 | MODULE_PARM_DESC(edid_fixup, | ||
155 | "Minimum number of valid EDID header bytes (0-8, default 6)"); | ||
152 | 156 | ||
153 | /* | 157 | /* |
154 | * Sanity check the EDID block (base or extension). Return 0 if the block | 158 | * Sanity check the EDID block (base or extension). Return 0 if the block |
@@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block) | |||
160 | u8 csum = 0; | 164 | u8 csum = 0; |
161 | struct edid *edid = (struct edid *)raw_edid; | 165 | struct edid *edid = (struct edid *)raw_edid; |
162 | 166 | ||
167 | if (edid_fixup > 8 || edid_fixup < 0) | ||
168 | edid_fixup = 6; | ||
169 | |||
163 | if (block == 0) { | 170 | if (block == 0) { |
164 | int score = drm_edid_header_is_valid(raw_edid); | 171 | int score = drm_edid_header_is_valid(raw_edid); |
165 | if (score == 8) ; | 172 | if (score == 8) ; |
166 | else if (score >= 6) { | 173 | else if (score >= edid_fixup) { |
167 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); | 174 | DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); |
168 | memcpy(raw_edid, edid_header, sizeof(edid_header)); | 175 | memcpy(raw_edid, edid_header, sizeof(edid_header)); |
169 | } else { | 176 | } else { |
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 3c8e04f54713..93e832d6c328 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c | |||
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { | |||
41 | 41 | ||
42 | MODULE_DEVICE_TABLE(pci, pciidlist); | 42 | MODULE_DEVICE_TABLE(pci, pciidlist); |
43 | 43 | ||
44 | static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev) | ||
45 | { | ||
46 | struct apertures_struct *ap; | ||
47 | bool primary = false; | ||
48 | |||
49 | ap = alloc_apertures(1); | ||
50 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
51 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
52 | |||
53 | #ifdef CONFIG_X86 | ||
54 | primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
55 | #endif | ||
56 | remove_conflicting_framebuffers(ap, "mgag200drmfb", primary); | ||
57 | kfree(ap); | ||
58 | } | ||
59 | |||
60 | |||
44 | static int __devinit | 61 | static int __devinit |
45 | mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 62 | mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
46 | { | 63 | { |
64 | mgag200_kick_out_firmware_fb(pdev); | ||
65 | |||
47 | return drm_get_pci_dev(pdev, ent, &driver); | 66 | return drm_get_pci_dev(pdev, ent, &driver); |
48 | } | 67 | } |
49 | 68 | ||
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 58991af90502..01550d05e273 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev) | |||
1029 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | 1029 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
1030 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | 1030 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
1031 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | 1031 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
1032 | if ((rdev->family == CHIP_JUNIPER) || | ||
1033 | (rdev->family == CHIP_CYPRESS) || | ||
1034 | (rdev->family == CHIP_HEMLOCK) || | ||
1035 | (rdev->family == CHIP_BARTS)) | ||
1036 | WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); | ||
1032 | } | 1037 | } |
1033 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | 1038 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
1034 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | 1039 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev) | |||
1553 | /* | 1558 | /* |
1554 | * Core functions | 1559 | * Core functions |
1555 | */ | 1560 | */ |
1556 | static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
1557 | u32 num_tile_pipes, | ||
1558 | u32 num_backends, | ||
1559 | u32 backend_disable_mask) | ||
1560 | { | ||
1561 | u32 backend_map = 0; | ||
1562 | u32 enabled_backends_mask = 0; | ||
1563 | u32 enabled_backends_count = 0; | ||
1564 | u32 cur_pipe; | ||
1565 | u32 swizzle_pipe[EVERGREEN_MAX_PIPES]; | ||
1566 | u32 cur_backend = 0; | ||
1567 | u32 i; | ||
1568 | bool force_no_swizzle; | ||
1569 | |||
1570 | if (num_tile_pipes > EVERGREEN_MAX_PIPES) | ||
1571 | num_tile_pipes = EVERGREEN_MAX_PIPES; | ||
1572 | if (num_tile_pipes < 1) | ||
1573 | num_tile_pipes = 1; | ||
1574 | if (num_backends > EVERGREEN_MAX_BACKENDS) | ||
1575 | num_backends = EVERGREEN_MAX_BACKENDS; | ||
1576 | if (num_backends < 1) | ||
1577 | num_backends = 1; | ||
1578 | |||
1579 | for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { | ||
1580 | if (((backend_disable_mask >> i) & 1) == 0) { | ||
1581 | enabled_backends_mask |= (1 << i); | ||
1582 | ++enabled_backends_count; | ||
1583 | } | ||
1584 | if (enabled_backends_count == num_backends) | ||
1585 | break; | ||
1586 | } | ||
1587 | |||
1588 | if (enabled_backends_count == 0) { | ||
1589 | enabled_backends_mask = 1; | ||
1590 | enabled_backends_count = 1; | ||
1591 | } | ||
1592 | |||
1593 | if (enabled_backends_count != num_backends) | ||
1594 | num_backends = enabled_backends_count; | ||
1595 | |||
1596 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES); | ||
1597 | switch (rdev->family) { | ||
1598 | case CHIP_CEDAR: | ||
1599 | case CHIP_REDWOOD: | ||
1600 | case CHIP_PALM: | ||
1601 | case CHIP_SUMO: | ||
1602 | case CHIP_SUMO2: | ||
1603 | case CHIP_TURKS: | ||
1604 | case CHIP_CAICOS: | ||
1605 | force_no_swizzle = false; | ||
1606 | break; | ||
1607 | case CHIP_CYPRESS: | ||
1608 | case CHIP_HEMLOCK: | ||
1609 | case CHIP_JUNIPER: | ||
1610 | case CHIP_BARTS: | ||
1611 | default: | ||
1612 | force_no_swizzle = true; | ||
1613 | break; | ||
1614 | } | ||
1615 | if (force_no_swizzle) { | ||
1616 | bool last_backend_enabled = false; | ||
1617 | |||
1618 | force_no_swizzle = false; | ||
1619 | for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) { | ||
1620 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
1621 | if (last_backend_enabled) | ||
1622 | force_no_swizzle = true; | ||
1623 | last_backend_enabled = true; | ||
1624 | } else | ||
1625 | last_backend_enabled = false; | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1629 | switch (num_tile_pipes) { | ||
1630 | case 1: | ||
1631 | case 3: | ||
1632 | case 5: | ||
1633 | case 7: | ||
1634 | DRM_ERROR("odd number of pipes!\n"); | ||
1635 | break; | ||
1636 | case 2: | ||
1637 | swizzle_pipe[0] = 0; | ||
1638 | swizzle_pipe[1] = 1; | ||
1639 | break; | ||
1640 | case 4: | ||
1641 | if (force_no_swizzle) { | ||
1642 | swizzle_pipe[0] = 0; | ||
1643 | swizzle_pipe[1] = 1; | ||
1644 | swizzle_pipe[2] = 2; | ||
1645 | swizzle_pipe[3] = 3; | ||
1646 | } else { | ||
1647 | swizzle_pipe[0] = 0; | ||
1648 | swizzle_pipe[1] = 2; | ||
1649 | swizzle_pipe[2] = 1; | ||
1650 | swizzle_pipe[3] = 3; | ||
1651 | } | ||
1652 | break; | ||
1653 | case 6: | ||
1654 | if (force_no_swizzle) { | ||
1655 | swizzle_pipe[0] = 0; | ||
1656 | swizzle_pipe[1] = 1; | ||
1657 | swizzle_pipe[2] = 2; | ||
1658 | swizzle_pipe[3] = 3; | ||
1659 | swizzle_pipe[4] = 4; | ||
1660 | swizzle_pipe[5] = 5; | ||
1661 | } else { | ||
1662 | swizzle_pipe[0] = 0; | ||
1663 | swizzle_pipe[1] = 2; | ||
1664 | swizzle_pipe[2] = 4; | ||
1665 | swizzle_pipe[3] = 1; | ||
1666 | swizzle_pipe[4] = 3; | ||
1667 | swizzle_pipe[5] = 5; | ||
1668 | } | ||
1669 | break; | ||
1670 | case 8: | ||
1671 | if (force_no_swizzle) { | ||
1672 | swizzle_pipe[0] = 0; | ||
1673 | swizzle_pipe[1] = 1; | ||
1674 | swizzle_pipe[2] = 2; | ||
1675 | swizzle_pipe[3] = 3; | ||
1676 | swizzle_pipe[4] = 4; | ||
1677 | swizzle_pipe[5] = 5; | ||
1678 | swizzle_pipe[6] = 6; | ||
1679 | swizzle_pipe[7] = 7; | ||
1680 | } else { | ||
1681 | swizzle_pipe[0] = 0; | ||
1682 | swizzle_pipe[1] = 2; | ||
1683 | swizzle_pipe[2] = 4; | ||
1684 | swizzle_pipe[3] = 6; | ||
1685 | swizzle_pipe[4] = 1; | ||
1686 | swizzle_pipe[5] = 3; | ||
1687 | swizzle_pipe[6] = 5; | ||
1688 | swizzle_pipe[7] = 7; | ||
1689 | } | ||
1690 | break; | ||
1691 | } | ||
1692 | |||
1693 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
1694 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
1695 | cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; | ||
1696 | |||
1697 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
1698 | |||
1699 | cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS; | ||
1700 | } | ||
1701 | |||
1702 | return backend_map; | ||
1703 | } | ||
1704 | |||
1705 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1561 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1706 | { | 1562 | { |
1707 | u32 cc_rb_backend_disable = 0; | 1563 | u32 gb_addr_config; |
1708 | u32 cc_gc_shader_pipe_config; | ||
1709 | u32 gb_addr_config = 0; | ||
1710 | u32 mc_shared_chmap, mc_arb_ramcfg; | 1564 | u32 mc_shared_chmap, mc_arb_ramcfg; |
1711 | u32 gb_backend_map; | ||
1712 | u32 grbm_gfx_index; | ||
1713 | u32 sx_debug_1; | 1565 | u32 sx_debug_1; |
1714 | u32 smx_dc_ctl0; | 1566 | u32 smx_dc_ctl0; |
1715 | u32 sq_config; | 1567 | u32 sq_config; |
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1724 | u32 sq_stack_resource_mgmt_3; | 1576 | u32 sq_stack_resource_mgmt_3; |
1725 | u32 vgt_cache_invalidation; | 1577 | u32 vgt_cache_invalidation; |
1726 | u32 hdp_host_path_cntl, tmp; | 1578 | u32 hdp_host_path_cntl, tmp; |
1579 | u32 disabled_rb_mask; | ||
1727 | int i, j, num_shader_engines, ps_thread_count; | 1580 | int i, j, num_shader_engines, ps_thread_count; |
1728 | 1581 | ||
1729 | switch (rdev->family) { | 1582 | switch (rdev->family) { |
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1748 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | 1601 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; |
1749 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1602 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1750 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1603 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1604 | gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN; | ||
1751 | break; | 1605 | break; |
1752 | case CHIP_JUNIPER: | 1606 | case CHIP_JUNIPER: |
1753 | rdev->config.evergreen.num_ses = 1; | 1607 | rdev->config.evergreen.num_ses = 1; |
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1769 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | 1623 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; |
1770 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1624 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1771 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1625 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1626 | gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN; | ||
1772 | break; | 1627 | break; |
1773 | case CHIP_REDWOOD: | 1628 | case CHIP_REDWOOD: |
1774 | rdev->config.evergreen.num_ses = 1; | 1629 | rdev->config.evergreen.num_ses = 1; |
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1790 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | 1645 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; |
1791 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1646 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1792 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1647 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1648 | gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; | ||
1793 | break; | 1649 | break; |
1794 | case CHIP_CEDAR: | 1650 | case CHIP_CEDAR: |
1795 | default: | 1651 | default: |
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1812 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1668 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
1813 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1669 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1814 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1670 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1671 | gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN; | ||
1815 | break; | 1672 | break; |
1816 | case CHIP_PALM: | 1673 | case CHIP_PALM: |
1817 | rdev->config.evergreen.num_ses = 1; | 1674 | rdev->config.evergreen.num_ses = 1; |
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1833 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1690 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
1834 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1691 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1835 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1692 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1693 | gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN; | ||
1836 | break; | 1694 | break; |
1837 | case CHIP_SUMO: | 1695 | case CHIP_SUMO: |
1838 | rdev->config.evergreen.num_ses = 1; | 1696 | rdev->config.evergreen.num_ses = 1; |
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1860 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1718 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
1861 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1719 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1862 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1720 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1721 | gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; | ||
1863 | break; | 1722 | break; |
1864 | case CHIP_SUMO2: | 1723 | case CHIP_SUMO2: |
1865 | rdev->config.evergreen.num_ses = 1; | 1724 | rdev->config.evergreen.num_ses = 1; |
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1881 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1740 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
1882 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1741 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1883 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1742 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1743 | gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; | ||
1884 | break; | 1744 | break; |
1885 | case CHIP_BARTS: | 1745 | case CHIP_BARTS: |
1886 | rdev->config.evergreen.num_ses = 2; | 1746 | rdev->config.evergreen.num_ses = 2; |
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1902 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | 1762 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; |
1903 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1763 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1904 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1764 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1765 | gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN; | ||
1905 | break; | 1766 | break; |
1906 | case CHIP_TURKS: | 1767 | case CHIP_TURKS: |
1907 | rdev->config.evergreen.num_ses = 1; | 1768 | rdev->config.evergreen.num_ses = 1; |
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1923 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; | 1784 | rdev->config.evergreen.sc_prim_fifo_size = 0x100; |
1924 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1785 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1925 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1786 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1787 | gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN; | ||
1926 | break; | 1788 | break; |
1927 | case CHIP_CAICOS: | 1789 | case CHIP_CAICOS: |
1928 | rdev->config.evergreen.num_ses = 1; | 1790 | rdev->config.evergreen.num_ses = 1; |
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1944 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; | 1806 | rdev->config.evergreen.sc_prim_fifo_size = 0x40; |
1945 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; | 1807 | rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; |
1946 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; | 1808 | rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; |
1809 | gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN; | ||
1947 | break; | 1810 | break; |
1948 | } | 1811 | } |
1949 | 1812 | ||
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1960 | 1823 | ||
1961 | evergreen_fix_pci_max_read_req_size(rdev); | 1824 | evergreen_fix_pci_max_read_req_size(rdev); |
1962 | 1825 | ||
1963 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2; | ||
1964 | |||
1965 | cc_gc_shader_pipe_config |= | ||
1966 | INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes) | ||
1967 | & EVERGREEN_MAX_PIPES_MASK); | ||
1968 | cc_gc_shader_pipe_config |= | ||
1969 | INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds) | ||
1970 | & EVERGREEN_MAX_SIMDS_MASK); | ||
1971 | |||
1972 | cc_rb_backend_disable = | ||
1973 | BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends) | ||
1974 | & EVERGREEN_MAX_BACKENDS_MASK); | ||
1975 | |||
1976 | |||
1977 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 1826 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
1978 | if ((rdev->family == CHIP_PALM) || | 1827 | if ((rdev->family == CHIP_PALM) || |
1979 | (rdev->family == CHIP_SUMO) || | 1828 | (rdev->family == CHIP_SUMO) || |
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
1982 | else | 1831 | else |
1983 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 1832 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
1984 | 1833 | ||
1985 | switch (rdev->config.evergreen.max_tile_pipes) { | ||
1986 | case 1: | ||
1987 | default: | ||
1988 | gb_addr_config |= NUM_PIPES(0); | ||
1989 | break; | ||
1990 | case 2: | ||
1991 | gb_addr_config |= NUM_PIPES(1); | ||
1992 | break; | ||
1993 | case 4: | ||
1994 | gb_addr_config |= NUM_PIPES(2); | ||
1995 | break; | ||
1996 | case 8: | ||
1997 | gb_addr_config |= NUM_PIPES(3); | ||
1998 | break; | ||
1999 | } | ||
2000 | |||
2001 | gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | ||
2002 | gb_addr_config |= BANK_INTERLEAVE_SIZE(0); | ||
2003 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1); | ||
2004 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1); | ||
2005 | gb_addr_config |= NUM_GPUS(0); /* Hemlock? */ | ||
2006 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
2007 | |||
2008 | if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2) | ||
2009 | gb_addr_config |= ROW_SIZE(2); | ||
2010 | else | ||
2011 | gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT); | ||
2012 | |||
2013 | if (rdev->ddev->pdev->device == 0x689e) { | ||
2014 | u32 efuse_straps_4; | ||
2015 | u32 efuse_straps_3; | ||
2016 | u8 efuse_box_bit_131_124; | ||
2017 | |||
2018 | WREG32(RCU_IND_INDEX, 0x204); | ||
2019 | efuse_straps_4 = RREG32(RCU_IND_DATA); | ||
2020 | WREG32(RCU_IND_INDEX, 0x203); | ||
2021 | efuse_straps_3 = RREG32(RCU_IND_DATA); | ||
2022 | efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28)); | ||
2023 | |||
2024 | switch(efuse_box_bit_131_124) { | ||
2025 | case 0x00: | ||
2026 | gb_backend_map = 0x76543210; | ||
2027 | break; | ||
2028 | case 0x55: | ||
2029 | gb_backend_map = 0x77553311; | ||
2030 | break; | ||
2031 | case 0x56: | ||
2032 | gb_backend_map = 0x77553300; | ||
2033 | break; | ||
2034 | case 0x59: | ||
2035 | gb_backend_map = 0x77552211; | ||
2036 | break; | ||
2037 | case 0x66: | ||
2038 | gb_backend_map = 0x77443300; | ||
2039 | break; | ||
2040 | case 0x99: | ||
2041 | gb_backend_map = 0x66552211; | ||
2042 | break; | ||
2043 | case 0x5a: | ||
2044 | gb_backend_map = 0x77552200; | ||
2045 | break; | ||
2046 | case 0xaa: | ||
2047 | gb_backend_map = 0x66442200; | ||
2048 | break; | ||
2049 | case 0x95: | ||
2050 | gb_backend_map = 0x66553311; | ||
2051 | break; | ||
2052 | default: | ||
2053 | DRM_ERROR("bad backend map, using default\n"); | ||
2054 | gb_backend_map = | ||
2055 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
2056 | rdev->config.evergreen.max_tile_pipes, | ||
2057 | rdev->config.evergreen.max_backends, | ||
2058 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
2059 | rdev->config.evergreen.max_backends) & | ||
2060 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
2061 | break; | ||
2062 | } | ||
2063 | } else if (rdev->ddev->pdev->device == 0x68b9) { | ||
2064 | u32 efuse_straps_3; | ||
2065 | u8 efuse_box_bit_127_124; | ||
2066 | |||
2067 | WREG32(RCU_IND_INDEX, 0x203); | ||
2068 | efuse_straps_3 = RREG32(RCU_IND_DATA); | ||
2069 | efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); | ||
2070 | |||
2071 | switch(efuse_box_bit_127_124) { | ||
2072 | case 0x0: | ||
2073 | gb_backend_map = 0x00003210; | ||
2074 | break; | ||
2075 | case 0x5: | ||
2076 | case 0x6: | ||
2077 | case 0x9: | ||
2078 | case 0xa: | ||
2079 | gb_backend_map = 0x00003311; | ||
2080 | break; | ||
2081 | default: | ||
2082 | DRM_ERROR("bad backend map, using default\n"); | ||
2083 | gb_backend_map = | ||
2084 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
2085 | rdev->config.evergreen.max_tile_pipes, | ||
2086 | rdev->config.evergreen.max_backends, | ||
2087 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
2088 | rdev->config.evergreen.max_backends) & | ||
2089 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
2090 | break; | ||
2091 | } | ||
2092 | } else { | ||
2093 | switch (rdev->family) { | ||
2094 | case CHIP_CYPRESS: | ||
2095 | case CHIP_HEMLOCK: | ||
2096 | case CHIP_BARTS: | ||
2097 | gb_backend_map = 0x66442200; | ||
2098 | break; | ||
2099 | case CHIP_JUNIPER: | ||
2100 | gb_backend_map = 0x00002200; | ||
2101 | break; | ||
2102 | default: | ||
2103 | gb_backend_map = | ||
2104 | evergreen_get_tile_pipe_to_backend_map(rdev, | ||
2105 | rdev->config.evergreen.max_tile_pipes, | ||
2106 | rdev->config.evergreen.max_backends, | ||
2107 | ((EVERGREEN_MAX_BACKENDS_MASK << | ||
2108 | rdev->config.evergreen.max_backends) & | ||
2109 | EVERGREEN_MAX_BACKENDS_MASK)); | ||
2110 | } | ||
2111 | } | ||
2112 | |||
2113 | /* setup tiling info dword. gb_addr_config is not adequate since it does | 1834 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
2114 | * not have bank info, so create a custom tiling dword. | 1835 | * not have bank info, so create a custom tiling dword. |
2115 | * bits 3:0 num_pipes | 1836 | * bits 3:0 num_pipes |
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2136 | /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ | 1857 | /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ |
2137 | if (rdev->flags & RADEON_IS_IGP) | 1858 | if (rdev->flags & RADEON_IS_IGP) |
2138 | rdev->config.evergreen.tile_config |= 1 << 4; | 1859 | rdev->config.evergreen.tile_config |= 1 << 4; |
2139 | else | 1860 | else { |
2140 | rdev->config.evergreen.tile_config |= | 1861 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) |
2141 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | 1862 | rdev->config.evergreen.tile_config |= 1 << 4; |
2142 | rdev->config.evergreen.tile_config |= | 1863 | else |
2143 | ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; | 1864 | rdev->config.evergreen.tile_config |= 0 << 4; |
1865 | } | ||
1866 | rdev->config.evergreen.tile_config |= 0 << 8; | ||
2144 | rdev->config.evergreen.tile_config |= | 1867 | rdev->config.evergreen.tile_config |= |
2145 | ((gb_addr_config & 0x30000000) >> 28) << 12; | 1868 | ((gb_addr_config & 0x30000000) >> 28) << 12; |
2146 | 1869 | ||
2147 | rdev->config.evergreen.backend_map = gb_backend_map; | 1870 | num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1; |
2148 | WREG32(GB_BACKEND_MAP, gb_backend_map); | ||
2149 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | ||
2150 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | ||
2151 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | ||
2152 | |||
2153 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | ||
2154 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | ||
2155 | 1871 | ||
2156 | for (i = 0; i < rdev->config.evergreen.num_ses; i++) { | 1872 | if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { |
2157 | u32 rb = cc_rb_backend_disable | (0xf0 << 16); | 1873 | u32 efuse_straps_4; |
2158 | u32 sp = cc_gc_shader_pipe_config; | 1874 | u32 efuse_straps_3; |
2159 | u32 gfx = grbm_gfx_index | SE_INDEX(i); | ||
2160 | 1875 | ||
2161 | if (i == num_shader_engines) { | 1876 | WREG32(RCU_IND_INDEX, 0x204); |
2162 | rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); | 1877 | efuse_straps_4 = RREG32(RCU_IND_DATA); |
2163 | sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); | 1878 | WREG32(RCU_IND_INDEX, 0x203); |
1879 | efuse_straps_3 = RREG32(RCU_IND_DATA); | ||
1880 | tmp = (((efuse_straps_4 & 0xf) << 4) | | ||
1881 | ((efuse_straps_3 & 0xf0000000) >> 28)); | ||
1882 | } else { | ||
1883 | tmp = 0; | ||
1884 | for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) { | ||
1885 | u32 rb_disable_bitmap; | ||
1886 | |||
1887 | WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); | ||
1888 | WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); | ||
1889 | rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; | ||
1890 | tmp <<= 4; | ||
1891 | tmp |= rb_disable_bitmap; | ||
2164 | } | 1892 | } |
1893 | } | ||
1894 | /* enabled rb are just the one not disabled :) */ | ||
1895 | disabled_rb_mask = tmp; | ||
2165 | 1896 | ||
2166 | WREG32(GRBM_GFX_INDEX, gfx); | 1897 | WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); |
2167 | WREG32(RLC_GFX_INDEX, gfx); | 1898 | WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); |
2168 | 1899 | ||
2169 | WREG32(CC_RB_BACKEND_DISABLE, rb); | 1900 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
2170 | WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); | 1901 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2171 | WREG32(GC_USER_RB_BACKEND_DISABLE, rb); | 1902 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2172 | WREG32(CC_GC_SHADER_PIPE_CONFIG, sp); | ||
2173 | } | ||
2174 | 1903 | ||
2175 | grbm_gfx_index |= SE_BROADCAST_WRITES; | 1904 | tmp = gb_addr_config & NUM_PIPES_MASK; |
2176 | WREG32(GRBM_GFX_INDEX, grbm_gfx_index); | 1905 | tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, |
2177 | WREG32(RLC_GFX_INDEX, grbm_gfx_index); | 1906 | EVERGREEN_MAX_BACKENDS, disabled_rb_mask); |
1907 | WREG32(GB_BACKEND_MAP, tmp); | ||
2178 | 1908 | ||
2179 | WREG32(CGTS_SYS_TCC_DISABLE, 0); | 1909 | WREG32(CGTS_SYS_TCC_DISABLE, 0); |
2180 | WREG32(CGTS_TCC_DISABLE, 0); | 1910 | WREG32(CGTS_TCC_DISABLE, 0); |
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 79130bfd1d6f..2773039b4902 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -37,6 +37,15 @@ | |||
37 | #define EVERGREEN_MAX_PIPES_MASK 0xFF | 37 | #define EVERGREEN_MAX_PIPES_MASK 0xFF |
38 | #define EVERGREEN_MAX_LDS_NUM 0xFFFF | 38 | #define EVERGREEN_MAX_LDS_NUM 0xFFFF |
39 | 39 | ||
40 | #define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003 | ||
41 | #define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003 | ||
42 | #define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 | ||
43 | #define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002 | ||
44 | #define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002 | ||
45 | #define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002 | ||
46 | #define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001 | ||
47 | #define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001 | ||
48 | |||
40 | /* Registers */ | 49 | /* Registers */ |
41 | 50 | ||
42 | #define RCU_IND_INDEX 0x100 | 51 | #define RCU_IND_INDEX 0x100 |
@@ -54,6 +63,7 @@ | |||
54 | #define BACKEND_DISABLE(x) ((x) << 16) | 63 | #define BACKEND_DISABLE(x) ((x) << 16) |
55 | #define GB_ADDR_CONFIG 0x98F8 | 64 | #define GB_ADDR_CONFIG 0x98F8 |
56 | #define NUM_PIPES(x) ((x) << 0) | 65 | #define NUM_PIPES(x) ((x) << 0) |
66 | #define NUM_PIPES_MASK 0x0000000f | ||
57 | #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) | 67 | #define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) |
58 | #define BANK_INTERLEAVE_SIZE(x) ((x) << 8) | 68 | #define BANK_INTERLEAVE_SIZE(x) ((x) << 8) |
59 | #define NUM_SHADER_ENGINES(x) ((x) << 12) | 69 | #define NUM_SHADER_ENGINES(x) ((x) << 12) |
@@ -452,6 +462,7 @@ | |||
452 | #define MC_VM_MD_L1_TLB0_CNTL 0x2654 | 462 | #define MC_VM_MD_L1_TLB0_CNTL 0x2654 |
453 | #define MC_VM_MD_L1_TLB1_CNTL 0x2658 | 463 | #define MC_VM_MD_L1_TLB1_CNTL 0x2658 |
454 | #define MC_VM_MD_L1_TLB2_CNTL 0x265C | 464 | #define MC_VM_MD_L1_TLB2_CNTL 0x265C |
465 | #define MC_VM_MD_L1_TLB3_CNTL 0x2698 | ||
455 | 466 | ||
456 | #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C | 467 | #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C |
457 | #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 | 468 | #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ce4e7cc6c905..3df4efa11942 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -417,215 +417,17 @@ out: | |||
417 | /* | 417 | /* |
418 | * Core functions | 418 | * Core functions |
419 | */ | 419 | */ |
420 | static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
421 | u32 num_tile_pipes, | ||
422 | u32 num_backends_per_asic, | ||
423 | u32 *backend_disable_mask_per_asic, | ||
424 | u32 num_shader_engines) | ||
425 | { | ||
426 | u32 backend_map = 0; | ||
427 | u32 enabled_backends_mask = 0; | ||
428 | u32 enabled_backends_count = 0; | ||
429 | u32 num_backends_per_se; | ||
430 | u32 cur_pipe; | ||
431 | u32 swizzle_pipe[CAYMAN_MAX_PIPES]; | ||
432 | u32 cur_backend = 0; | ||
433 | u32 i; | ||
434 | bool force_no_swizzle; | ||
435 | |||
436 | /* force legal values */ | ||
437 | if (num_tile_pipes < 1) | ||
438 | num_tile_pipes = 1; | ||
439 | if (num_tile_pipes > rdev->config.cayman.max_tile_pipes) | ||
440 | num_tile_pipes = rdev->config.cayman.max_tile_pipes; | ||
441 | if (num_shader_engines < 1) | ||
442 | num_shader_engines = 1; | ||
443 | if (num_shader_engines > rdev->config.cayman.max_shader_engines) | ||
444 | num_shader_engines = rdev->config.cayman.max_shader_engines; | ||
445 | if (num_backends_per_asic < num_shader_engines) | ||
446 | num_backends_per_asic = num_shader_engines; | ||
447 | if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines)) | ||
448 | num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines; | ||
449 | |||
450 | /* make sure we have the same number of backends per se */ | ||
451 | num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines); | ||
452 | /* set up the number of backends per se */ | ||
453 | num_backends_per_se = num_backends_per_asic / num_shader_engines; | ||
454 | if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) { | ||
455 | num_backends_per_se = rdev->config.cayman.max_backends_per_se; | ||
456 | num_backends_per_asic = num_backends_per_se * num_shader_engines; | ||
457 | } | ||
458 | |||
459 | /* create enable mask and count for enabled backends */ | ||
460 | for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { | ||
461 | if (((*backend_disable_mask_per_asic >> i) & 1) == 0) { | ||
462 | enabled_backends_mask |= (1 << i); | ||
463 | ++enabled_backends_count; | ||
464 | } | ||
465 | if (enabled_backends_count == num_backends_per_asic) | ||
466 | break; | ||
467 | } | ||
468 | |||
469 | /* force the backends mask to match the current number of backends */ | ||
470 | if (enabled_backends_count != num_backends_per_asic) { | ||
471 | u32 this_backend_enabled; | ||
472 | u32 shader_engine; | ||
473 | u32 backend_per_se; | ||
474 | |||
475 | enabled_backends_mask = 0; | ||
476 | enabled_backends_count = 0; | ||
477 | *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK; | ||
478 | for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { | ||
479 | /* calc the current se */ | ||
480 | shader_engine = i / rdev->config.cayman.max_backends_per_se; | ||
481 | /* calc the backend per se */ | ||
482 | backend_per_se = i % rdev->config.cayman.max_backends_per_se; | ||
483 | /* default to not enabled */ | ||
484 | this_backend_enabled = 0; | ||
485 | if ((shader_engine < num_shader_engines) && | ||
486 | (backend_per_se < num_backends_per_se)) | ||
487 | this_backend_enabled = 1; | ||
488 | if (this_backend_enabled) { | ||
489 | enabled_backends_mask |= (1 << i); | ||
490 | *backend_disable_mask_per_asic &= ~(1 << i); | ||
491 | ++enabled_backends_count; | ||
492 | } | ||
493 | } | ||
494 | } | ||
495 | |||
496 | |||
497 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES); | ||
498 | switch (rdev->family) { | ||
499 | case CHIP_CAYMAN: | ||
500 | case CHIP_ARUBA: | ||
501 | force_no_swizzle = true; | ||
502 | break; | ||
503 | default: | ||
504 | force_no_swizzle = false; | ||
505 | break; | ||
506 | } | ||
507 | if (force_no_swizzle) { | ||
508 | bool last_backend_enabled = false; | ||
509 | |||
510 | force_no_swizzle = false; | ||
511 | for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) { | ||
512 | if (((enabled_backends_mask >> i) & 1) == 1) { | ||
513 | if (last_backend_enabled) | ||
514 | force_no_swizzle = true; | ||
515 | last_backend_enabled = true; | ||
516 | } else | ||
517 | last_backend_enabled = false; | ||
518 | } | ||
519 | } | ||
520 | |||
521 | switch (num_tile_pipes) { | ||
522 | case 1: | ||
523 | case 3: | ||
524 | case 5: | ||
525 | case 7: | ||
526 | DRM_ERROR("odd number of pipes!\n"); | ||
527 | break; | ||
528 | case 2: | ||
529 | swizzle_pipe[0] = 0; | ||
530 | swizzle_pipe[1] = 1; | ||
531 | break; | ||
532 | case 4: | ||
533 | if (force_no_swizzle) { | ||
534 | swizzle_pipe[0] = 0; | ||
535 | swizzle_pipe[1] = 1; | ||
536 | swizzle_pipe[2] = 2; | ||
537 | swizzle_pipe[3] = 3; | ||
538 | } else { | ||
539 | swizzle_pipe[0] = 0; | ||
540 | swizzle_pipe[1] = 2; | ||
541 | swizzle_pipe[2] = 1; | ||
542 | swizzle_pipe[3] = 3; | ||
543 | } | ||
544 | break; | ||
545 | case 6: | ||
546 | if (force_no_swizzle) { | ||
547 | swizzle_pipe[0] = 0; | ||
548 | swizzle_pipe[1] = 1; | ||
549 | swizzle_pipe[2] = 2; | ||
550 | swizzle_pipe[3] = 3; | ||
551 | swizzle_pipe[4] = 4; | ||
552 | swizzle_pipe[5] = 5; | ||
553 | } else { | ||
554 | swizzle_pipe[0] = 0; | ||
555 | swizzle_pipe[1] = 2; | ||
556 | swizzle_pipe[2] = 4; | ||
557 | swizzle_pipe[3] = 1; | ||
558 | swizzle_pipe[4] = 3; | ||
559 | swizzle_pipe[5] = 5; | ||
560 | } | ||
561 | break; | ||
562 | case 8: | ||
563 | if (force_no_swizzle) { | ||
564 | swizzle_pipe[0] = 0; | ||
565 | swizzle_pipe[1] = 1; | ||
566 | swizzle_pipe[2] = 2; | ||
567 | swizzle_pipe[3] = 3; | ||
568 | swizzle_pipe[4] = 4; | ||
569 | swizzle_pipe[5] = 5; | ||
570 | swizzle_pipe[6] = 6; | ||
571 | swizzle_pipe[7] = 7; | ||
572 | } else { | ||
573 | swizzle_pipe[0] = 0; | ||
574 | swizzle_pipe[1] = 2; | ||
575 | swizzle_pipe[2] = 4; | ||
576 | swizzle_pipe[3] = 6; | ||
577 | swizzle_pipe[4] = 1; | ||
578 | swizzle_pipe[5] = 3; | ||
579 | swizzle_pipe[6] = 5; | ||
580 | swizzle_pipe[7] = 7; | ||
581 | } | ||
582 | break; | ||
583 | } | ||
584 | |||
585 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
586 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
587 | cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; | ||
588 | |||
589 | backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4))); | ||
590 | |||
591 | cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS; | ||
592 | } | ||
593 | |||
594 | return backend_map; | ||
595 | } | ||
596 | |||
597 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | ||
598 | u32 disable_mask_per_se, | ||
599 | u32 max_disable_mask_per_se, | ||
600 | u32 num_shader_engines) | ||
601 | { | ||
602 | u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se); | ||
603 | u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se; | ||
604 | |||
605 | if (num_shader_engines == 1) | ||
606 | return disable_mask_per_asic; | ||
607 | else if (num_shader_engines == 2) | ||
608 | return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se); | ||
609 | else | ||
610 | return 0xffffffff; | ||
611 | } | ||
612 | |||
613 | static void cayman_gpu_init(struct radeon_device *rdev) | 420 | static void cayman_gpu_init(struct radeon_device *rdev) |
614 | { | 421 | { |
615 | u32 cc_rb_backend_disable = 0; | ||
616 | u32 cc_gc_shader_pipe_config; | ||
617 | u32 gb_addr_config = 0; | 422 | u32 gb_addr_config = 0; |
618 | u32 mc_shared_chmap, mc_arb_ramcfg; | 423 | u32 mc_shared_chmap, mc_arb_ramcfg; |
619 | u32 gb_backend_map; | ||
620 | u32 cgts_tcc_disable; | 424 | u32 cgts_tcc_disable; |
621 | u32 sx_debug_1; | 425 | u32 sx_debug_1; |
622 | u32 smx_dc_ctl0; | 426 | u32 smx_dc_ctl0; |
623 | u32 gc_user_shader_pipe_config; | ||
624 | u32 gc_user_rb_backend_disable; | ||
625 | u32 cgts_user_tcc_disable; | ||
626 | u32 cgts_sm_ctrl_reg; | 427 | u32 cgts_sm_ctrl_reg; |
627 | u32 hdp_host_path_cntl; | 428 | u32 hdp_host_path_cntl; |
628 | u32 tmp; | 429 | u32 tmp; |
430 | u32 disabled_rb_mask; | ||
629 | int i, j; | 431 | int i, j; |
630 | 432 | ||
631 | switch (rdev->family) { | 433 | switch (rdev->family) { |
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
650 | rdev->config.cayman.sc_prim_fifo_size = 0x100; | 452 | rdev->config.cayman.sc_prim_fifo_size = 0x100; |
651 | rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; | 453 | rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; |
652 | rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; | 454 | rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; |
455 | gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN; | ||
653 | break; | 456 | break; |
654 | case CHIP_ARUBA: | 457 | case CHIP_ARUBA: |
655 | default: | 458 | default: |
@@ -687,6 +490,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
687 | rdev->config.cayman.sc_prim_fifo_size = 0x40; | 490 | rdev->config.cayman.sc_prim_fifo_size = 0x40; |
688 | rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; | 491 | rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; |
689 | rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; | 492 | rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; |
493 | gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN; | ||
690 | break; | 494 | break; |
691 | } | 495 | } |
692 | 496 | ||
@@ -706,39 +510,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
706 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); | 510 | mc_shared_chmap = RREG32(MC_SHARED_CHMAP); |
707 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 511 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
708 | 512 | ||
709 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); | ||
710 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); | ||
711 | cgts_tcc_disable = 0xffff0000; | ||
712 | for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) | ||
713 | cgts_tcc_disable &= ~(1 << (16 + i)); | ||
714 | gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); | ||
715 | gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); | ||
716 | cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); | ||
717 | |||
718 | rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines; | ||
719 | tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; | ||
720 | rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp); | ||
721 | rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes; | ||
722 | tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT; | ||
723 | rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp); | ||
724 | tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
725 | rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp); | ||
726 | tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT; | ||
727 | rdev->config.cayman.backend_disable_mask_per_asic = | ||
728 | cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK, | ||
729 | rdev->config.cayman.num_shader_engines); | ||
730 | rdev->config.cayman.backend_map = | ||
731 | cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, | ||
732 | rdev->config.cayman.num_backends_per_se * | ||
733 | rdev->config.cayman.num_shader_engines, | ||
734 | &rdev->config.cayman.backend_disable_mask_per_asic, | ||
735 | rdev->config.cayman.num_shader_engines); | ||
736 | tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT; | ||
737 | rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp); | ||
738 | tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT; | ||
739 | rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256; | ||
740 | if (rdev->config.cayman.mem_max_burst_length_bytes > 512) | ||
741 | rdev->config.cayman.mem_max_burst_length_bytes = 512; | ||
742 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; | 513 | tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; |
743 | rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; | 514 | rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; |
744 | if (rdev->config.cayman.mem_row_size_in_kb > 4) | 515 | if (rdev->config.cayman.mem_row_size_in_kb > 4) |
@@ -748,73 +519,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
748 | rdev->config.cayman.num_gpus = 1; | 519 | rdev->config.cayman.num_gpus = 1; |
749 | rdev->config.cayman.multi_gpu_tile_size = 64; | 520 | rdev->config.cayman.multi_gpu_tile_size = 64; |
750 | 521 | ||
751 | //gb_addr_config = 0x02011003 | ||
752 | #if 0 | ||
753 | gb_addr_config = RREG32(GB_ADDR_CONFIG); | ||
754 | #else | ||
755 | gb_addr_config = 0; | ||
756 | switch (rdev->config.cayman.num_tile_pipes) { | ||
757 | case 1: | ||
758 | default: | ||
759 | gb_addr_config |= NUM_PIPES(0); | ||
760 | break; | ||
761 | case 2: | ||
762 | gb_addr_config |= NUM_PIPES(1); | ||
763 | break; | ||
764 | case 4: | ||
765 | gb_addr_config |= NUM_PIPES(2); | ||
766 | break; | ||
767 | case 8: | ||
768 | gb_addr_config |= NUM_PIPES(3); | ||
769 | break; | ||
770 | } | ||
771 | |||
772 | tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1; | ||
773 | gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp); | ||
774 | gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1); | ||
775 | tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1; | ||
776 | gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp); | ||
777 | switch (rdev->config.cayman.num_gpus) { | ||
778 | case 1: | ||
779 | default: | ||
780 | gb_addr_config |= NUM_GPUS(0); | ||
781 | break; | ||
782 | case 2: | ||
783 | gb_addr_config |= NUM_GPUS(1); | ||
784 | break; | ||
785 | case 4: | ||
786 | gb_addr_config |= NUM_GPUS(2); | ||
787 | break; | ||
788 | } | ||
789 | switch (rdev->config.cayman.multi_gpu_tile_size) { | ||
790 | case 16: | ||
791 | gb_addr_config |= MULTI_GPU_TILE_SIZE(0); | ||
792 | break; | ||
793 | case 32: | ||
794 | default: | ||
795 | gb_addr_config |= MULTI_GPU_TILE_SIZE(1); | ||
796 | break; | ||
797 | case 64: | ||
798 | gb_addr_config |= MULTI_GPU_TILE_SIZE(2); | ||
799 | break; | ||
800 | case 128: | ||
801 | gb_addr_config |= MULTI_GPU_TILE_SIZE(3); | ||
802 | break; | ||
803 | } | ||
804 | switch (rdev->config.cayman.mem_row_size_in_kb) { | ||
805 | case 1: | ||
806 | default: | ||
807 | gb_addr_config |= ROW_SIZE(0); | ||
808 | break; | ||
809 | case 2: | ||
810 | gb_addr_config |= ROW_SIZE(1); | ||
811 | break; | ||
812 | case 4: | ||
813 | gb_addr_config |= ROW_SIZE(2); | ||
814 | break; | ||
815 | } | ||
816 | #endif | ||
817 | |||
818 | tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; | 522 | tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; |
819 | rdev->config.cayman.num_tile_pipes = (1 << tmp); | 523 | rdev->config.cayman.num_tile_pipes = (1 << tmp); |
820 | tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; | 524 | tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; |
@@ -828,17 +532,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
828 | tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; | 532 | tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; |
829 | rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; | 533 | rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; |
830 | 534 | ||
831 | //gb_backend_map = 0x76541032; | 535 | |
832 | #if 0 | ||
833 | gb_backend_map = RREG32(GB_BACKEND_MAP); | ||
834 | #else | ||
835 | gb_backend_map = | ||
836 | cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes, | ||
837 | rdev->config.cayman.num_backends_per_se * | ||
838 | rdev->config.cayman.num_shader_engines, | ||
839 | &rdev->config.cayman.backend_disable_mask_per_asic, | ||
840 | rdev->config.cayman.num_shader_engines); | ||
841 | #endif | ||
842 | /* setup tiling info dword. gb_addr_config is not adequate since it does | 536 | /* setup tiling info dword. gb_addr_config is not adequate since it does |
843 | * not have bank info, so create a custom tiling dword. | 537 | * not have bank info, so create a custom tiling dword. |
844 | * bits 3:0 num_pipes | 538 | * bits 3:0 num_pipes |
@@ -866,33 +560,49 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
866 | /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ | 560 | /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ |
867 | if (rdev->flags & RADEON_IS_IGP) | 561 | if (rdev->flags & RADEON_IS_IGP) |
868 | rdev->config.cayman.tile_config |= 1 << 4; | 562 | rdev->config.cayman.tile_config |= 1 << 4; |
869 | else | 563 | else { |
870 | rdev->config.cayman.tile_config |= | 564 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) |
871 | ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; | 565 | rdev->config.cayman.tile_config |= 1 << 4; |
566 | else | ||
567 | rdev->config.cayman.tile_config |= 0 << 4; | ||
568 | } | ||
872 | rdev->config.cayman.tile_config |= | 569 | rdev->config.cayman.tile_config |= |
873 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; | 570 | ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; |
874 | rdev->config.cayman.tile_config |= | 571 | rdev->config.cayman.tile_config |= |
875 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; | 572 | ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; |
876 | 573 | ||
877 | rdev->config.cayman.backend_map = gb_backend_map; | 574 | tmp = 0; |
878 | WREG32(GB_BACKEND_MAP, gb_backend_map); | 575 | for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) { |
576 | u32 rb_disable_bitmap; | ||
577 | |||
578 | WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); | ||
579 | WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i)); | ||
580 | rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16; | ||
581 | tmp <<= 4; | ||
582 | tmp |= rb_disable_bitmap; | ||
583 | } | ||
584 | /* enabled rb are just the one not disabled :) */ | ||
585 | disabled_rb_mask = tmp; | ||
586 | |||
587 | WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); | ||
588 | WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES); | ||
589 | |||
879 | WREG32(GB_ADDR_CONFIG, gb_addr_config); | 590 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
880 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 591 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
881 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 592 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
882 | 593 | ||
883 | /* primary versions */ | 594 | tmp = gb_addr_config & NUM_PIPES_MASK; |
884 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 595 | tmp = r6xx_remap_render_backend(rdev, tmp, |
885 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 596 | rdev->config.cayman.max_backends_per_se * |
886 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 597 | rdev->config.cayman.max_shader_engines, |
598 | CAYMAN_MAX_BACKENDS, disabled_rb_mask); | ||
599 | WREG32(GB_BACKEND_MAP, tmp); | ||
887 | 600 | ||
601 | cgts_tcc_disable = 0xffff0000; | ||
602 | for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++) | ||
603 | cgts_tcc_disable &= ~(1 << (16 + i)); | ||
888 | WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); | 604 | WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); |
889 | WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); | 605 | WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); |
890 | |||
891 | /* user versions */ | ||
892 | WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
893 | WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
894 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
895 | |||
896 | WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); | 606 | WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); |
897 | WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); | 607 | WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); |
898 | 608 | ||
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2aa7046ada56..a0b98066e207 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h | |||
@@ -41,6 +41,9 @@ | |||
41 | #define CAYMAN_MAX_TCC 16 | 41 | #define CAYMAN_MAX_TCC 16 |
42 | #define CAYMAN_MAX_TCC_MASK 0xFF | 42 | #define CAYMAN_MAX_TCC_MASK 0xFF |
43 | 43 | ||
44 | #define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003 | ||
45 | #define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 | ||
46 | |||
44 | #define DMIF_ADDR_CONFIG 0xBD4 | 47 | #define DMIF_ADDR_CONFIG 0xBD4 |
45 | #define SRBM_GFX_CNTL 0x0E44 | 48 | #define SRBM_GFX_CNTL 0x0E44 |
46 | #define RINGID(x) (((x) & 0x3) << 0) | 49 | #define RINGID(x) (((x) & 0x3) << 0) |
@@ -148,6 +151,8 @@ | |||
148 | #define CGTS_SYS_TCC_DISABLE 0x3F90 | 151 | #define CGTS_SYS_TCC_DISABLE 0x3F90 |
149 | #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 | 152 | #define CGTS_USER_SYS_TCC_DISABLE 0x3F94 |
150 | 153 | ||
154 | #define RLC_GFX_INDEX 0x3FC4 | ||
155 | |||
151 | #define CONFIG_MEMSIZE 0x5428 | 156 | #define CONFIG_MEMSIZE 0x5428 |
152 | 157 | ||
153 | #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 | 158 | #define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 |
@@ -212,6 +217,12 @@ | |||
212 | #define SOFT_RESET_VGT (1 << 14) | 217 | #define SOFT_RESET_VGT (1 << 14) |
213 | #define SOFT_RESET_IA (1 << 15) | 218 | #define SOFT_RESET_IA (1 << 15) |
214 | 219 | ||
220 | #define GRBM_GFX_INDEX 0x802C | ||
221 | #define INSTANCE_INDEX(x) ((x) << 0) | ||
222 | #define SE_INDEX(x) ((x) << 16) | ||
223 | #define INSTANCE_BROADCAST_WRITES (1 << 30) | ||
224 | #define SE_BROADCAST_WRITES (1 << 31) | ||
225 | |||
215 | #define SCRATCH_REG0 0x8500 | 226 | #define SCRATCH_REG0 0x8500 |
216 | #define SCRATCH_REG1 0x8504 | 227 | #define SCRATCH_REG1 0x8504 |
217 | #define SCRATCH_REG2 0x8508 | 228 | #define SCRATCH_REG2 0x8508 |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index f388a1d73b63..45cfcea63507 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev) | |||
1376 | return r600_gpu_soft_reset(rdev); | 1376 | return r600_gpu_soft_reset(rdev); |
1377 | } | 1377 | } |
1378 | 1378 | ||
1379 | static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, | 1379 | u32 r6xx_remap_render_backend(struct radeon_device *rdev, |
1380 | u32 num_backends, | 1380 | u32 tiling_pipe_num, |
1381 | u32 backend_disable_mask) | 1381 | u32 max_rb_num, |
1382 | { | 1382 | u32 total_max_rb_num, |
1383 | u32 backend_map = 0; | 1383 | u32 disabled_rb_mask) |
1384 | u32 enabled_backends_mask; | 1384 | { |
1385 | u32 enabled_backends_count; | 1385 | u32 rendering_pipe_num, rb_num_width, req_rb_num; |
1386 | u32 cur_pipe; | 1386 | u32 pipe_rb_ratio, pipe_rb_remain; |
1387 | u32 swizzle_pipe[R6XX_MAX_PIPES]; | 1387 | u32 data = 0, mask = 1 << (max_rb_num - 1); |
1388 | u32 cur_backend; | 1388 | unsigned i, j; |
1389 | u32 i; | 1389 | |
1390 | 1390 | /* mask out the RBs that don't exist on that asic */ | |
1391 | if (num_tile_pipes > R6XX_MAX_PIPES) | 1391 | disabled_rb_mask |= (0xff << max_rb_num) & 0xff; |
1392 | num_tile_pipes = R6XX_MAX_PIPES; | 1392 | |
1393 | if (num_tile_pipes < 1) | 1393 | rendering_pipe_num = 1 << tiling_pipe_num; |
1394 | num_tile_pipes = 1; | 1394 | req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask); |
1395 | if (num_backends > R6XX_MAX_BACKENDS) | 1395 | BUG_ON(rendering_pipe_num < req_rb_num); |
1396 | num_backends = R6XX_MAX_BACKENDS; | 1396 | |
1397 | if (num_backends < 1) | 1397 | pipe_rb_ratio = rendering_pipe_num / req_rb_num; |
1398 | num_backends = 1; | 1398 | pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num; |
1399 | 1399 | ||
1400 | enabled_backends_mask = 0; | 1400 | if (rdev->family <= CHIP_RV740) { |
1401 | enabled_backends_count = 0; | 1401 | /* r6xx/r7xx */ |
1402 | for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { | 1402 | rb_num_width = 2; |
1403 | if (((backend_disable_mask >> i) & 1) == 0) { | 1403 | } else { |
1404 | enabled_backends_mask |= (1 << i); | 1404 | /* eg+ */ |
1405 | ++enabled_backends_count; | 1405 | rb_num_width = 4; |
1406 | } | ||
1407 | if (enabled_backends_count == num_backends) | ||
1408 | break; | ||
1409 | } | ||
1410 | |||
1411 | if (enabled_backends_count == 0) { | ||
1412 | enabled_backends_mask = 1; | ||
1413 | enabled_backends_count = 1; | ||
1414 | } | ||
1415 | |||
1416 | if (enabled_backends_count != num_backends) | ||
1417 | num_backends = enabled_backends_count; | ||
1418 | |||
1419 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES); | ||
1420 | switch (num_tile_pipes) { | ||
1421 | case 1: | ||
1422 | swizzle_pipe[0] = 0; | ||
1423 | break; | ||
1424 | case 2: | ||
1425 | swizzle_pipe[0] = 0; | ||
1426 | swizzle_pipe[1] = 1; | ||
1427 | break; | ||
1428 | case 3: | ||
1429 | swizzle_pipe[0] = 0; | ||
1430 | swizzle_pipe[1] = 1; | ||
1431 | swizzle_pipe[2] = 2; | ||
1432 | break; | ||
1433 | case 4: | ||
1434 | swizzle_pipe[0] = 0; | ||
1435 | swizzle_pipe[1] = 1; | ||
1436 | swizzle_pipe[2] = 2; | ||
1437 | swizzle_pipe[3] = 3; | ||
1438 | break; | ||
1439 | case 5: | ||
1440 | swizzle_pipe[0] = 0; | ||
1441 | swizzle_pipe[1] = 1; | ||
1442 | swizzle_pipe[2] = 2; | ||
1443 | swizzle_pipe[3] = 3; | ||
1444 | swizzle_pipe[4] = 4; | ||
1445 | break; | ||
1446 | case 6: | ||
1447 | swizzle_pipe[0] = 0; | ||
1448 | swizzle_pipe[1] = 2; | ||
1449 | swizzle_pipe[2] = 4; | ||
1450 | swizzle_pipe[3] = 5; | ||
1451 | swizzle_pipe[4] = 1; | ||
1452 | swizzle_pipe[5] = 3; | ||
1453 | break; | ||
1454 | case 7: | ||
1455 | swizzle_pipe[0] = 0; | ||
1456 | swizzle_pipe[1] = 2; | ||
1457 | swizzle_pipe[2] = 4; | ||
1458 | swizzle_pipe[3] = 6; | ||
1459 | swizzle_pipe[4] = 1; | ||
1460 | swizzle_pipe[5] = 3; | ||
1461 | swizzle_pipe[6] = 5; | ||
1462 | break; | ||
1463 | case 8: | ||
1464 | swizzle_pipe[0] = 0; | ||
1465 | swizzle_pipe[1] = 2; | ||
1466 | swizzle_pipe[2] = 4; | ||
1467 | swizzle_pipe[3] = 6; | ||
1468 | swizzle_pipe[4] = 1; | ||
1469 | swizzle_pipe[5] = 3; | ||
1470 | swizzle_pipe[6] = 5; | ||
1471 | swizzle_pipe[7] = 7; | ||
1472 | break; | ||
1473 | } | 1406 | } |
1474 | 1407 | ||
1475 | cur_backend = 0; | 1408 | for (i = 0; i < max_rb_num; i++) { |
1476 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | 1409 | if (!(mask & disabled_rb_mask)) { |
1477 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | 1410 | for (j = 0; j < pipe_rb_ratio; j++) { |
1478 | cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; | 1411 | data <<= rb_num_width; |
1479 | 1412 | data |= max_rb_num - i - 1; | |
1480 | backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); | 1413 | } |
1481 | 1414 | if (pipe_rb_remain) { | |
1482 | cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; | 1415 | data <<= rb_num_width; |
1416 | data |= max_rb_num - i - 1; | ||
1417 | pipe_rb_remain--; | ||
1418 | } | ||
1419 | } | ||
1420 | mask >>= 1; | ||
1483 | } | 1421 | } |
1484 | 1422 | ||
1485 | return backend_map; | 1423 | return data; |
1486 | } | 1424 | } |
1487 | 1425 | ||
1488 | int r600_count_pipe_bits(uint32_t val) | 1426 | int r600_count_pipe_bits(uint32_t val) |
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1500 | { | 1438 | { |
1501 | u32 tiling_config; | 1439 | u32 tiling_config; |
1502 | u32 ramcfg; | 1440 | u32 ramcfg; |
1503 | u32 backend_map; | ||
1504 | u32 cc_rb_backend_disable; | 1441 | u32 cc_rb_backend_disable; |
1505 | u32 cc_gc_shader_pipe_config; | 1442 | u32 cc_gc_shader_pipe_config; |
1506 | u32 tmp; | 1443 | u32 tmp; |
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1511 | u32 sq_thread_resource_mgmt = 0; | 1448 | u32 sq_thread_resource_mgmt = 0; |
1512 | u32 sq_stack_resource_mgmt_1 = 0; | 1449 | u32 sq_stack_resource_mgmt_1 = 0; |
1513 | u32 sq_stack_resource_mgmt_2 = 0; | 1450 | u32 sq_stack_resource_mgmt_2 = 0; |
1451 | u32 disabled_rb_mask; | ||
1514 | 1452 | ||
1515 | /* FIXME: implement */ | 1453 | rdev->config.r600.tiling_group_size = 256; |
1516 | switch (rdev->family) { | 1454 | switch (rdev->family) { |
1517 | case CHIP_R600: | 1455 | case CHIP_R600: |
1518 | rdev->config.r600.max_pipes = 4; | 1456 | rdev->config.r600.max_pipes = 4; |
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1616 | rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | 1554 | rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
1617 | tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | 1555 | tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); |
1618 | tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | 1556 | tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); |
1619 | if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) | 1557 | |
1620 | rdev->config.r600.tiling_group_size = 512; | ||
1621 | else | ||
1622 | rdev->config.r600.tiling_group_size = 256; | ||
1623 | tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; | 1558 | tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; |
1624 | if (tmp > 3) { | 1559 | if (tmp > 3) { |
1625 | tiling_config |= ROW_TILING(3); | 1560 | tiling_config |= ROW_TILING(3); |
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev) | |||
1631 | tiling_config |= BANK_SWAPS(1); | 1566 | tiling_config |= BANK_SWAPS(1); |
1632 | 1567 | ||
1633 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; | 1568 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; |
1634 | cc_rb_backend_disable |= | 1569 | tmp = R6XX_MAX_BACKENDS - |
1635 | BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); | 1570 | r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); |
1636 | 1571 | if (tmp < rdev->config.r600.max_backends) { | |
1637 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; | 1572 | rdev->config.r600.max_backends = tmp; |
1638 | cc_gc_shader_pipe_config |= | 1573 | } |
1639 | INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); | 1574 | |
1640 | cc_gc_shader_pipe_config |= | 1575 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; |
1641 | INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); | 1576 | tmp = R6XX_MAX_PIPES - |
1642 | 1577 | r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); | |
1643 | backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, | 1578 | if (tmp < rdev->config.r600.max_pipes) { |
1644 | (R6XX_MAX_BACKENDS - | 1579 | rdev->config.r600.max_pipes = tmp; |
1645 | r600_count_pipe_bits((cc_rb_backend_disable & | 1580 | } |
1646 | R6XX_MAX_BACKENDS_MASK) >> 16)), | 1581 | tmp = R6XX_MAX_SIMDS - |
1647 | (cc_rb_backend_disable >> 16)); | 1582 | r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); |
1583 | if (tmp < rdev->config.r600.max_simds) { | ||
1584 | rdev->config.r600.max_simds = tmp; | ||
1585 | } | ||
1586 | |||
1587 | disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; | ||
1588 | tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; | ||
1589 | tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, | ||
1590 | R6XX_MAX_BACKENDS, disabled_rb_mask); | ||
1591 | tiling_config |= tmp << 16; | ||
1592 | rdev->config.r600.backend_map = tmp; | ||
1593 | |||
1648 | rdev->config.r600.tile_config = tiling_config; | 1594 | rdev->config.r600.tile_config = tiling_config; |
1649 | rdev->config.r600.backend_map = backend_map; | ||
1650 | tiling_config |= BACKEND_MAP(backend_map); | ||
1651 | WREG32(GB_TILING_CONFIG, tiling_config); | 1595 | WREG32(GB_TILING_CONFIG, tiling_config); |
1652 | WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); | 1596 | WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); |
1653 | WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); | 1597 | WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); |
1654 | 1598 | ||
1655 | /* Setup pipes */ | ||
1656 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
1657 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
1658 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
1659 | |||
1660 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | 1599 | tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
1661 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); | 1600 | WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); |
1662 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); | 1601 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 15bd3b216243..a0dbf1fe6a40 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -219,6 +219,8 @@ | |||
219 | #define BACKEND_MAP(x) ((x) << 16) | 219 | #define BACKEND_MAP(x) ((x) << 16) |
220 | 220 | ||
221 | #define GB_TILING_CONFIG 0x98F0 | 221 | #define GB_TILING_CONFIG 0x98F0 |
222 | #define PIPE_TILING__SHIFT 1 | ||
223 | #define PIPE_TILING__MASK 0x0000000e | ||
222 | 224 | ||
223 | #define GC_USER_SHADER_PIPE_CONFIG 0x8954 | 225 | #define GC_USER_SHADER_PIPE_CONFIG 0x8954 |
224 | #define INACTIVE_QD_PIPES(x) ((x) << 8) | 226 | #define INACTIVE_QD_PIPES(x) ((x) << 8) |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2e24022b389a..85dac33e3cce 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1848,6 +1848,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); | |||
1848 | extern void r600_hdmi_enable(struct drm_encoder *encoder); | 1848 | extern void r600_hdmi_enable(struct drm_encoder *encoder); |
1849 | extern void r600_hdmi_disable(struct drm_encoder *encoder); | 1849 | extern void r600_hdmi_disable(struct drm_encoder *encoder); |
1850 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); | 1850 | extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
1851 | extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, | ||
1852 | u32 tiling_pipe_num, | ||
1853 | u32 max_rb_num, | ||
1854 | u32 total_max_rb_num, | ||
1855 | u32 enabled_rb_mask); | ||
1851 | 1856 | ||
1852 | /* | 1857 | /* |
1853 | * evergreen functions used by radeon_encoder.c | 1858 | * evergreen functions used by radeon_encoder.c |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 0137689ed461..142f89462aa4 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p) | |||
147 | sync_to_ring, p->ring); | 147 | sync_to_ring, p->ring); |
148 | } | 148 | } |
149 | 149 | ||
150 | /* XXX: note that this is called from the legacy UMS CS ioctl as well */ | ||
150 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | 151 | int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) |
151 | { | 152 | { |
152 | struct drm_radeon_cs *cs = data; | 153 | struct drm_radeon_cs *cs = data; |
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) | |||
245 | } | 246 | } |
246 | } | 247 | } |
247 | 248 | ||
248 | if ((p->cs_flags & RADEON_CS_USE_VM) && | 249 | /* these are KMS only */ |
249 | !p->rdev->vm_manager.enabled) { | 250 | if (p->rdev) { |
250 | DRM_ERROR("VM not active on asic!\n"); | 251 | if ((p->cs_flags & RADEON_CS_USE_VM) && |
251 | return -EINVAL; | 252 | !p->rdev->vm_manager.enabled) { |
252 | } | 253 | DRM_ERROR("VM not active on asic!\n"); |
253 | 254 | return -EINVAL; | |
254 | /* we only support VM on SI+ */ | 255 | } |
255 | if ((p->rdev->family >= CHIP_TAHITI) && | ||
256 | ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { | ||
257 | DRM_ERROR("VM required on SI+!\n"); | ||
258 | return -EINVAL; | ||
259 | } | ||
260 | 256 | ||
261 | if (radeon_cs_get_ring(p, ring, priority)) | 257 | /* we only support VM on SI+ */ |
262 | return -EINVAL; | 258 | if ((p->rdev->family >= CHIP_TAHITI) && |
259 | ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { | ||
260 | DRM_ERROR("VM required on SI+!\n"); | ||
261 | return -EINVAL; | ||
262 | } | ||
263 | 263 | ||
264 | if (radeon_cs_get_ring(p, ring, priority)) | ||
265 | return -EINVAL; | ||
266 | } | ||
264 | 267 | ||
265 | /* deal with non-vm */ | 268 | /* deal with non-vm */ |
266 | if ((p->chunk_ib_idx != -1) && | 269 | if ((p->chunk_ib_idx != -1) && |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index c2f473bc13b8..04ddc365a908 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) | |||
151 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); | 151 | WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); |
152 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); | 152 | WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); |
153 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); | 153 | WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); |
154 | if (rdev->family == CHIP_RV740) | ||
155 | WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp); | ||
154 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); | 156 | WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); |
155 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); | 157 | WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); |
156 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); | 158 | WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); |
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev) | |||
363 | /* | 365 | /* |
364 | * Core functions | 366 | * Core functions |
365 | */ | 367 | */ |
366 | static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | ||
367 | u32 num_tile_pipes, | ||
368 | u32 num_backends, | ||
369 | u32 backend_disable_mask) | ||
370 | { | ||
371 | u32 backend_map = 0; | ||
372 | u32 enabled_backends_mask; | ||
373 | u32 enabled_backends_count; | ||
374 | u32 cur_pipe; | ||
375 | u32 swizzle_pipe[R7XX_MAX_PIPES]; | ||
376 | u32 cur_backend; | ||
377 | u32 i; | ||
378 | bool force_no_swizzle; | ||
379 | |||
380 | if (num_tile_pipes > R7XX_MAX_PIPES) | ||
381 | num_tile_pipes = R7XX_MAX_PIPES; | ||
382 | if (num_tile_pipes < 1) | ||
383 | num_tile_pipes = 1; | ||
384 | if (num_backends > R7XX_MAX_BACKENDS) | ||
385 | num_backends = R7XX_MAX_BACKENDS; | ||
386 | if (num_backends < 1) | ||
387 | num_backends = 1; | ||
388 | |||
389 | enabled_backends_mask = 0; | ||
390 | enabled_backends_count = 0; | ||
391 | for (i = 0; i < R7XX_MAX_BACKENDS; ++i) { | ||
392 | if (((backend_disable_mask >> i) & 1) == 0) { | ||
393 | enabled_backends_mask |= (1 << i); | ||
394 | ++enabled_backends_count; | ||
395 | } | ||
396 | if (enabled_backends_count == num_backends) | ||
397 | break; | ||
398 | } | ||
399 | |||
400 | if (enabled_backends_count == 0) { | ||
401 | enabled_backends_mask = 1; | ||
402 | enabled_backends_count = 1; | ||
403 | } | ||
404 | |||
405 | if (enabled_backends_count != num_backends) | ||
406 | num_backends = enabled_backends_count; | ||
407 | |||
408 | switch (rdev->family) { | ||
409 | case CHIP_RV770: | ||
410 | case CHIP_RV730: | ||
411 | force_no_swizzle = false; | ||
412 | break; | ||
413 | case CHIP_RV710: | ||
414 | case CHIP_RV740: | ||
415 | default: | ||
416 | force_no_swizzle = true; | ||
417 | break; | ||
418 | } | ||
419 | |||
420 | memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); | ||
421 | switch (num_tile_pipes) { | ||
422 | case 1: | ||
423 | swizzle_pipe[0] = 0; | ||
424 | break; | ||
425 | case 2: | ||
426 | swizzle_pipe[0] = 0; | ||
427 | swizzle_pipe[1] = 1; | ||
428 | break; | ||
429 | case 3: | ||
430 | if (force_no_swizzle) { | ||
431 | swizzle_pipe[0] = 0; | ||
432 | swizzle_pipe[1] = 1; | ||
433 | swizzle_pipe[2] = 2; | ||
434 | } else { | ||
435 | swizzle_pipe[0] = 0; | ||
436 | swizzle_pipe[1] = 2; | ||
437 | swizzle_pipe[2] = 1; | ||
438 | } | ||
439 | break; | ||
440 | case 4: | ||
441 | if (force_no_swizzle) { | ||
442 | swizzle_pipe[0] = 0; | ||
443 | swizzle_pipe[1] = 1; | ||
444 | swizzle_pipe[2] = 2; | ||
445 | swizzle_pipe[3] = 3; | ||
446 | } else { | ||
447 | swizzle_pipe[0] = 0; | ||
448 | swizzle_pipe[1] = 2; | ||
449 | swizzle_pipe[2] = 3; | ||
450 | swizzle_pipe[3] = 1; | ||
451 | } | ||
452 | break; | ||
453 | case 5: | ||
454 | if (force_no_swizzle) { | ||
455 | swizzle_pipe[0] = 0; | ||
456 | swizzle_pipe[1] = 1; | ||
457 | swizzle_pipe[2] = 2; | ||
458 | swizzle_pipe[3] = 3; | ||
459 | swizzle_pipe[4] = 4; | ||
460 | } else { | ||
461 | swizzle_pipe[0] = 0; | ||
462 | swizzle_pipe[1] = 2; | ||
463 | swizzle_pipe[2] = 4; | ||
464 | swizzle_pipe[3] = 1; | ||
465 | swizzle_pipe[4] = 3; | ||
466 | } | ||
467 | break; | ||
468 | case 6: | ||
469 | if (force_no_swizzle) { | ||
470 | swizzle_pipe[0] = 0; | ||
471 | swizzle_pipe[1] = 1; | ||
472 | swizzle_pipe[2] = 2; | ||
473 | swizzle_pipe[3] = 3; | ||
474 | swizzle_pipe[4] = 4; | ||
475 | swizzle_pipe[5] = 5; | ||
476 | } else { | ||
477 | swizzle_pipe[0] = 0; | ||
478 | swizzle_pipe[1] = 2; | ||
479 | swizzle_pipe[2] = 4; | ||
480 | swizzle_pipe[3] = 5; | ||
481 | swizzle_pipe[4] = 3; | ||
482 | swizzle_pipe[5] = 1; | ||
483 | } | ||
484 | break; | ||
485 | case 7: | ||
486 | if (force_no_swizzle) { | ||
487 | swizzle_pipe[0] = 0; | ||
488 | swizzle_pipe[1] = 1; | ||
489 | swizzle_pipe[2] = 2; | ||
490 | swizzle_pipe[3] = 3; | ||
491 | swizzle_pipe[4] = 4; | ||
492 | swizzle_pipe[5] = 5; | ||
493 | swizzle_pipe[6] = 6; | ||
494 | } else { | ||
495 | swizzle_pipe[0] = 0; | ||
496 | swizzle_pipe[1] = 2; | ||
497 | swizzle_pipe[2] = 4; | ||
498 | swizzle_pipe[3] = 6; | ||
499 | swizzle_pipe[4] = 3; | ||
500 | swizzle_pipe[5] = 1; | ||
501 | swizzle_pipe[6] = 5; | ||
502 | } | ||
503 | break; | ||
504 | case 8: | ||
505 | if (force_no_swizzle) { | ||
506 | swizzle_pipe[0] = 0; | ||
507 | swizzle_pipe[1] = 1; | ||
508 | swizzle_pipe[2] = 2; | ||
509 | swizzle_pipe[3] = 3; | ||
510 | swizzle_pipe[4] = 4; | ||
511 | swizzle_pipe[5] = 5; | ||
512 | swizzle_pipe[6] = 6; | ||
513 | swizzle_pipe[7] = 7; | ||
514 | } else { | ||
515 | swizzle_pipe[0] = 0; | ||
516 | swizzle_pipe[1] = 2; | ||
517 | swizzle_pipe[2] = 4; | ||
518 | swizzle_pipe[3] = 6; | ||
519 | swizzle_pipe[4] = 3; | ||
520 | swizzle_pipe[5] = 1; | ||
521 | swizzle_pipe[6] = 7; | ||
522 | swizzle_pipe[7] = 5; | ||
523 | } | ||
524 | break; | ||
525 | } | ||
526 | |||
527 | cur_backend = 0; | ||
528 | for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { | ||
529 | while (((1 << cur_backend) & enabled_backends_mask) == 0) | ||
530 | cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; | ||
531 | |||
532 | backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); | ||
533 | |||
534 | cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS; | ||
535 | } | ||
536 | |||
537 | return backend_map; | ||
538 | } | ||
539 | |||
540 | static void rv770_gpu_init(struct radeon_device *rdev) | 368 | static void rv770_gpu_init(struct radeon_device *rdev) |
541 | { | 369 | { |
542 | int i, j, num_qd_pipes; | 370 | int i, j, num_qd_pipes; |
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
552 | u32 sq_thread_resource_mgmt; | 380 | u32 sq_thread_resource_mgmt; |
553 | u32 hdp_host_path_cntl; | 381 | u32 hdp_host_path_cntl; |
554 | u32 sq_dyn_gpr_size_simd_ab_0; | 382 | u32 sq_dyn_gpr_size_simd_ab_0; |
555 | u32 backend_map; | ||
556 | u32 gb_tiling_config = 0; | 383 | u32 gb_tiling_config = 0; |
557 | u32 cc_rb_backend_disable = 0; | 384 | u32 cc_rb_backend_disable = 0; |
558 | u32 cc_gc_shader_pipe_config = 0; | 385 | u32 cc_gc_shader_pipe_config = 0; |
559 | u32 mc_arb_ramcfg; | 386 | u32 mc_arb_ramcfg; |
560 | u32 db_debug4; | 387 | u32 db_debug4, tmp; |
388 | u32 inactive_pipes, shader_pipe_config; | ||
389 | u32 disabled_rb_mask; | ||
390 | unsigned active_number; | ||
561 | 391 | ||
562 | /* setup chip specs */ | 392 | /* setup chip specs */ |
393 | rdev->config.rv770.tiling_group_size = 256; | ||
563 | switch (rdev->family) { | 394 | switch (rdev->family) { |
564 | case CHIP_RV770: | 395 | case CHIP_RV770: |
565 | rdev->config.rv770.max_pipes = 4; | 396 | rdev->config.rv770.max_pipes = 4; |
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
670 | /* setup tiling, simd, pipe config */ | 501 | /* setup tiling, simd, pipe config */ |
671 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); | 502 | mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); |
672 | 503 | ||
504 | shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); | ||
505 | inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT; | ||
506 | for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) { | ||
507 | if (!(inactive_pipes & tmp)) { | ||
508 | active_number++; | ||
509 | } | ||
510 | tmp <<= 1; | ||
511 | } | ||
512 | if (active_number == 1) { | ||
513 | WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1); | ||
514 | } else { | ||
515 | WREG32(SPI_CONFIG_CNTL, 0); | ||
516 | } | ||
517 | |||
518 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; | ||
519 | tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); | ||
520 | if (tmp < rdev->config.rv770.max_backends) { | ||
521 | rdev->config.rv770.max_backends = tmp; | ||
522 | } | ||
523 | |||
524 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; | ||
525 | tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); | ||
526 | if (tmp < rdev->config.rv770.max_pipes) { | ||
527 | rdev->config.rv770.max_pipes = tmp; | ||
528 | } | ||
529 | tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); | ||
530 | if (tmp < rdev->config.rv770.max_simds) { | ||
531 | rdev->config.rv770.max_simds = tmp; | ||
532 | } | ||
533 | |||
673 | switch (rdev->config.rv770.max_tile_pipes) { | 534 | switch (rdev->config.rv770.max_tile_pipes) { |
674 | case 1: | 535 | case 1: |
675 | default: | 536 | default: |
676 | gb_tiling_config |= PIPE_TILING(0); | 537 | gb_tiling_config = PIPE_TILING(0); |
677 | break; | 538 | break; |
678 | case 2: | 539 | case 2: |
679 | gb_tiling_config |= PIPE_TILING(1); | 540 | gb_tiling_config = PIPE_TILING(1); |
680 | break; | 541 | break; |
681 | case 4: | 542 | case 4: |
682 | gb_tiling_config |= PIPE_TILING(2); | 543 | gb_tiling_config = PIPE_TILING(2); |
683 | break; | 544 | break; |
684 | case 8: | 545 | case 8: |
685 | gb_tiling_config |= PIPE_TILING(3); | 546 | gb_tiling_config = PIPE_TILING(3); |
686 | break; | 547 | break; |
687 | } | 548 | } |
688 | rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; | 549 | rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; |
689 | 550 | ||
551 | disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; | ||
552 | tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; | ||
553 | tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends, | ||
554 | R7XX_MAX_BACKENDS, disabled_rb_mask); | ||
555 | gb_tiling_config |= tmp << 16; | ||
556 | rdev->config.rv770.backend_map = tmp; | ||
557 | |||
690 | if (rdev->family == CHIP_RV770) | 558 | if (rdev->family == CHIP_RV770) |
691 | gb_tiling_config |= BANK_TILING(1); | 559 | gb_tiling_config |= BANK_TILING(1); |
692 | else | 560 | else { |
693 | gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); | 561 | if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) |
562 | gb_tiling_config |= BANK_TILING(1); | ||
563 | else | ||
564 | gb_tiling_config |= BANK_TILING(0); | ||
565 | } | ||
694 | rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); | 566 | rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); |
695 | gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); | 567 | gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); |
696 | if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) | ||
697 | rdev->config.rv770.tiling_group_size = 512; | ||
698 | else | ||
699 | rdev->config.rv770.tiling_group_size = 256; | ||
700 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { | 568 | if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { |
701 | gb_tiling_config |= ROW_TILING(3); | 569 | gb_tiling_config |= ROW_TILING(3); |
702 | gb_tiling_config |= SAMPLE_SPLIT(3); | 570 | gb_tiling_config |= SAMPLE_SPLIT(3); |
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
708 | } | 576 | } |
709 | 577 | ||
710 | gb_tiling_config |= BANK_SWAPS(1); | 578 | gb_tiling_config |= BANK_SWAPS(1); |
711 | |||
712 | cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; | ||
713 | cc_rb_backend_disable |= | ||
714 | BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); | ||
715 | |||
716 | cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; | ||
717 | cc_gc_shader_pipe_config |= | ||
718 | INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); | ||
719 | cc_gc_shader_pipe_config |= | ||
720 | INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); | ||
721 | |||
722 | if (rdev->family == CHIP_RV740) | ||
723 | backend_map = 0x28; | ||
724 | else | ||
725 | backend_map = r700_get_tile_pipe_to_backend_map(rdev, | ||
726 | rdev->config.rv770.max_tile_pipes, | ||
727 | (R7XX_MAX_BACKENDS - | ||
728 | r600_count_pipe_bits((cc_rb_backend_disable & | ||
729 | R7XX_MAX_BACKENDS_MASK) >> 16)), | ||
730 | (cc_rb_backend_disable >> 16)); | ||
731 | |||
732 | rdev->config.rv770.tile_config = gb_tiling_config; | 579 | rdev->config.rv770.tile_config = gb_tiling_config; |
733 | rdev->config.rv770.backend_map = backend_map; | ||
734 | gb_tiling_config |= BACKEND_MAP(backend_map); | ||
735 | 580 | ||
736 | WREG32(GB_TILING_CONFIG, gb_tiling_config); | 581 | WREG32(GB_TILING_CONFIG, gb_tiling_config); |
737 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 582 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
738 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 583 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
739 | 584 | ||
740 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
741 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
742 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | ||
743 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | ||
744 | |||
745 | WREG32(CGTS_SYS_TCC_DISABLE, 0); | 585 | WREG32(CGTS_SYS_TCC_DISABLE, 0); |
746 | WREG32(CGTS_TCC_DISABLE, 0); | 586 | WREG32(CGTS_TCC_DISABLE, 0); |
747 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); | 587 | WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); |
748 | WREG32(CGTS_USER_TCC_DISABLE, 0); | 588 | WREG32(CGTS_USER_TCC_DISABLE, 0); |
749 | 589 | ||
750 | num_qd_pipes = | 590 | |
751 | R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); | 591 | num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); |
752 | WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); | 592 | WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); |
753 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); | 593 | WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); |
754 | 594 | ||
@@ -809,8 +649,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
809 | 649 | ||
810 | WREG32(VGT_NUM_INSTANCES, 1); | 650 | WREG32(VGT_NUM_INSTANCES, 1); |
811 | 651 | ||
812 | WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0)); | ||
813 | |||
814 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); | 652 | WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); |
815 | 653 | ||
816 | WREG32(CP_PERFMON_CNTL, 0); | 654 | WREG32(CP_PERFMON_CNTL, 0); |
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9c549f702f2f..fdc089896011 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h | |||
@@ -106,10 +106,13 @@ | |||
106 | #define BACKEND_MAP(x) ((x) << 16) | 106 | #define BACKEND_MAP(x) ((x) << 16) |
107 | 107 | ||
108 | #define GB_TILING_CONFIG 0x98F0 | 108 | #define GB_TILING_CONFIG 0x98F0 |
109 | #define PIPE_TILING__SHIFT 1 | ||
110 | #define PIPE_TILING__MASK 0x0000000e | ||
109 | 111 | ||
110 | #define GC_USER_SHADER_PIPE_CONFIG 0x8954 | 112 | #define GC_USER_SHADER_PIPE_CONFIG 0x8954 |
111 | #define INACTIVE_QD_PIPES(x) ((x) << 8) | 113 | #define INACTIVE_QD_PIPES(x) ((x) << 8) |
112 | #define INACTIVE_QD_PIPES_MASK 0x0000FF00 | 114 | #define INACTIVE_QD_PIPES_MASK 0x0000FF00 |
115 | #define INACTIVE_QD_PIPES_SHIFT 8 | ||
113 | #define INACTIVE_SIMDS(x) ((x) << 16) | 116 | #define INACTIVE_SIMDS(x) ((x) << 16) |
114 | #define INACTIVE_SIMDS_MASK 0x00FF0000 | 117 | #define INACTIVE_SIMDS_MASK 0x00FF0000 |
115 | 118 | ||
@@ -174,6 +177,7 @@ | |||
174 | #define MC_VM_MD_L1_TLB0_CNTL 0x2654 | 177 | #define MC_VM_MD_L1_TLB0_CNTL 0x2654 |
175 | #define MC_VM_MD_L1_TLB1_CNTL 0x2658 | 178 | #define MC_VM_MD_L1_TLB1_CNTL 0x2658 |
176 | #define MC_VM_MD_L1_TLB2_CNTL 0x265C | 179 | #define MC_VM_MD_L1_TLB2_CNTL 0x265C |
180 | #define MC_VM_MD_L1_TLB3_CNTL 0x2698 | ||
177 | #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C | 181 | #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C |
178 | #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 | 182 | #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 |
179 | #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 | 183 | #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 |
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 36792bd4da77..b67cfcaa661f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c | |||
@@ -1834,6 +1834,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |||
1834 | spin_unlock(&glob->lru_lock); | 1834 | spin_unlock(&glob->lru_lock); |
1835 | (void) ttm_bo_cleanup_refs(bo, false, false, false); | 1835 | (void) ttm_bo_cleanup_refs(bo, false, false, false); |
1836 | kref_put(&bo->list_kref, ttm_bo_release_list); | 1836 | kref_put(&bo->list_kref, ttm_bo_release_list); |
1837 | spin_lock(&glob->lru_lock); | ||
1837 | continue; | 1838 | continue; |
1838 | } | 1839 | } |
1839 | 1840 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 51c9ba5cd2fb..21ee78226560 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c | |||
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, | |||
66 | cmd += sizeof(remap_cmd) / sizeof(uint32); | 66 | cmd += sizeof(remap_cmd) / sizeof(uint32); |
67 | 67 | ||
68 | for (i = 0; i < num_pages; ++i) { | 68 | for (i = 0; i < num_pages; ++i) { |
69 | if (VMW_PPN_SIZE > 4) | 69 | if (VMW_PPN_SIZE <= 4) |
70 | *cmd = page_to_pfn(*pages++); | 70 | *cmd = page_to_pfn(*pages++); |
71 | else | 71 | else |
72 | *((uint64_t *)cmd) = page_to_pfn(*pages++); | 72 | *((uint64_t *)cmd) = page_to_pfn(*pages++); |