aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-07-07 23:42:41 -0400
committerDave Airlie <airlied@redhat.com>2016-07-07 23:42:41 -0400
commit6f6e68b383314ab10189f983fead55437c149f32 (patch)
treea9254f00bb2053aa567981c8979c9713d648346c /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
parentb33e07731c13f2a9ec5c345b8542cae5adf74235 (diff)
parentb1814a1def0564a2a1d3be7fa5bf7243ff899a28 (diff)
Merge branch 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is the main 4.8 pull for radeon and amdgpu. Sorry for the delay, I meant to send this out last week, but I was moving house. Lots of changes here: - ATPX improvements for better dGPU power control on PX systems - New power features for CZ/BR/ST - Pipelined BO moves and evictions in TTM - GPU scheduler improvements - GPU reset improvements - Overclocking on dGPUs with amdgpu - Lots of code cleanup - Bug fixes * 'drm-next-4.8' of git://people.freedesktop.org/~agd5f/linux: (191 commits) drm/amd/powerplay: don't add invalid voltage. drm/amdgpu: add read/write function for GC CAC programming drm/amd/powerplay: add definitions related to di/dt feature for fiji and polaris. drm/amd/powerplay: add shared definitions for di/dt feature. drm/amdgpu: remove gfx8 registers that vary between asics drm/amd/powerplay: add mvdd dpm support. drm/amdgpu: get number of shade engine by cgs interface. drm/amdgpu: remove more of the ring backup code drm/amd/powerplay: Unify family defines drm/amdgpu: clean up ring_backup code, no need more drm/amdgpu: ib test first after gpu reset drm/amdgpu: recovery hw jobs when gpu reset V3 drm/amdgpu: abstract amdgpu_vm_is_gpu_reset drm/amdgpu: add a bool to specify if needing vm flush V2 drm/amdgpu: add amd_sched_job_recovery drm/amdgpu: force completion for gpu reset drm/amdgpu: block ttm first before parking scheduler drm/amd: add amd_sched_hw_job_reset drm/amd: add parent for sched fence drm/amdgpu: remove evict vram ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c270
1 files changed, 218 insertions, 52 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e920086af46..9c9f28c1ce84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/kthread.h>
28#include <linux/console.h> 29#include <linux/console.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/debugfs.h> 31#include <linux/debugfs.h>
@@ -35,6 +36,7 @@
35#include <linux/vga_switcheroo.h> 36#include <linux/vga_switcheroo.h>
36#include <linux/efi.h> 37#include <linux/efi.h>
37#include "amdgpu.h" 38#include "amdgpu.h"
39#include "amdgpu_trace.h"
38#include "amdgpu_i2c.h" 40#include "amdgpu_i2c.h"
39#include "atom.h" 41#include "atom.h"
40#include "amdgpu_atombios.h" 42#include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
79uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 81uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
80 bool always_indirect) 82 bool always_indirect)
81{ 83{
84 uint32_t ret;
85
82 if ((reg * 4) < adev->rmmio_size && !always_indirect) 86 if ((reg * 4) < adev->rmmio_size && !always_indirect)
83 return readl(((void __iomem *)adev->rmmio) + (reg * 4)); 87 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
84 else { 88 else {
85 unsigned long flags; 89 unsigned long flags;
86 uint32_t ret;
87 90
88 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 91 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
89 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 92 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
90 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 93 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
91 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 94 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
92
93 return ret;
94 } 95 }
96 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
97 return ret;
95} 98}
96 99
97void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 100void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
98 bool always_indirect) 101 bool always_indirect)
99{ 102{
103 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
104
100 if ((reg * 4) < adev->rmmio_size && !always_indirect) 105 if ((reg * 4) < adev->rmmio_size && !always_indirect)
101 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 106 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
102 else { 107 else {
@@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1070 int i, r = 0; 1075 int i, r = 0;
1071 1076
1072 for (i = 0; i < adev->num_ip_blocks; i++) { 1077 for (i = 0; i < adev->num_ip_blocks; i++) {
1078 if (!adev->ip_block_status[i].valid)
1079 continue;
1073 if (adev->ip_blocks[i].type == block_type) { 1080 if (adev->ip_blocks[i].type == block_type) {
1074 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1081 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1075 state); 1082 state);
1076 if (r) 1083 if (r)
1077 return r; 1084 return r;
1085 break;
1078 } 1086 }
1079 } 1087 }
1080 return r; 1088 return r;
@@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1087 int i, r = 0; 1095 int i, r = 0;
1088 1096
1089 for (i = 0; i < adev->num_ip_blocks; i++) { 1097 for (i = 0; i < adev->num_ip_blocks; i++) {
1098 if (!adev->ip_block_status[i].valid)
1099 continue;
1090 if (adev->ip_blocks[i].type == block_type) { 1100 if (adev->ip_blocks[i].type == block_type) {
1091 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, 1101 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
1092 state); 1102 state);
1093 if (r) 1103 if (r)
1094 return r; 1104 return r;
1105 break;
1095 } 1106 }
1096 } 1107 }
1097 return r; 1108 return r;
1098} 1109}
1099 1110
1111int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1112 enum amd_ip_block_type block_type)
1113{
1114 int i, r;
1115
1116 for (i = 0; i < adev->num_ip_blocks; i++) {
1117 if (!adev->ip_block_status[i].valid)
1118 continue;
1119 if (adev->ip_blocks[i].type == block_type) {
1120 r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
1121 if (r)
1122 return r;
1123 break;
1124 }
1125 }
1126 return 0;
1127
1128}
1129
1130bool amdgpu_is_idle(struct amdgpu_device *adev,
1131 enum amd_ip_block_type block_type)
1132{
1133 int i;
1134
1135 for (i = 0; i < adev->num_ip_blocks; i++) {
1136 if (!adev->ip_block_status[i].valid)
1137 continue;
1138 if (adev->ip_blocks[i].type == block_type)
1139 return adev->ip_blocks[i].funcs->is_idle((void *)adev);
1140 }
1141 return true;
1142
1143}
1144
1100const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 1145const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1101 struct amdgpu_device *adev, 1146 struct amdgpu_device *adev,
1102 enum amd_ip_block_type type) 1147 enum amd_ip_block_type type)
@@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1209 } 1254 }
1210 } 1255 }
1211 1256
1257 adev->cg_flags &= amdgpu_cg_mask;
1258 adev->pg_flags &= amdgpu_pg_mask;
1259
1212 return 0; 1260 return 0;
1213} 1261}
1214 1262
@@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1440 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 1488 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1441 adev->didt_rreg = &amdgpu_invalid_rreg; 1489 adev->didt_rreg = &amdgpu_invalid_rreg;
1442 adev->didt_wreg = &amdgpu_invalid_wreg; 1490 adev->didt_wreg = &amdgpu_invalid_wreg;
1491 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1492 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1443 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 1493 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1444 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 1494 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1445 1495
1496
1446 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1497 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1447 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 1498 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1448 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 1499 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1467 spin_lock_init(&adev->pcie_idx_lock); 1518 spin_lock_init(&adev->pcie_idx_lock);
1468 spin_lock_init(&adev->uvd_ctx_idx_lock); 1519 spin_lock_init(&adev->uvd_ctx_idx_lock);
1469 spin_lock_init(&adev->didt_idx_lock); 1520 spin_lock_init(&adev->didt_idx_lock);
1521 spin_lock_init(&adev->gc_cac_idx_lock);
1470 spin_lock_init(&adev->audio_endpt_idx_lock); 1522 spin_lock_init(&adev->audio_endpt_idx_lock);
1471 1523
1472 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 1524 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1511 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 1563 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1512 1564
1513 /* Read BIOS */ 1565 /* Read BIOS */
1514 if (!amdgpu_get_bios(adev)) 1566 if (!amdgpu_get_bios(adev)) {
1515 return -EINVAL; 1567 r = -EINVAL;
1568 goto failed;
1569 }
1516 /* Must be an ATOMBIOS */ 1570 /* Must be an ATOMBIOS */
1517 if (!adev->is_atom_bios) { 1571 if (!adev->is_atom_bios) {
1518 dev_err(adev->dev, "Expecting atombios for GPU\n"); 1572 dev_err(adev->dev, "Expecting atombios for GPU\n");
1519 return -EINVAL; 1573 r = -EINVAL;
1574 goto failed;
1520 } 1575 }
1521 r = amdgpu_atombios_init(adev); 1576 r = amdgpu_atombios_init(adev);
1522 if (r) { 1577 if (r) {
1523 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 1578 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1524 return r; 1579 goto failed;
1525 } 1580 }
1526 1581
1527 /* See if the asic supports SR-IOV */ 1582 /* See if the asic supports SR-IOV */
@@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1538 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { 1593 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
1539 if (!adev->bios) { 1594 if (!adev->bios) {
1540 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1595 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
1541 return -EINVAL; 1596 r = -EINVAL;
1597 goto failed;
1542 } 1598 }
1543 DRM_INFO("GPU not posted. posting now...\n"); 1599 DRM_INFO("GPU not posted. posting now...\n");
1544 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1600 amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1548 r = amdgpu_atombios_get_clock_info(adev); 1604 r = amdgpu_atombios_get_clock_info(adev);
1549 if (r) { 1605 if (r) {
1550 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 1606 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1551 return r; 1607 goto failed;
1552 } 1608 }
1553 /* init i2c buses */ 1609 /* init i2c buses */
1554 amdgpu_atombios_i2c_init(adev); 1610 amdgpu_atombios_i2c_init(adev);
@@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1557 r = amdgpu_fence_driver_init(adev); 1613 r = amdgpu_fence_driver_init(adev);
1558 if (r) { 1614 if (r) {
1559 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 1615 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1560 return r; 1616 goto failed;
1561 } 1617 }
1562 1618
1563 /* init the mode config */ 1619 /* init the mode config */
@@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1567 if (r) { 1623 if (r) {
1568 dev_err(adev->dev, "amdgpu_init failed\n"); 1624 dev_err(adev->dev, "amdgpu_init failed\n");
1569 amdgpu_fini(adev); 1625 amdgpu_fini(adev);
1570 return r; 1626 goto failed;
1571 } 1627 }
1572 1628
1573 adev->accel_working = true; 1629 adev->accel_working = true;
@@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1577 r = amdgpu_ib_pool_init(adev); 1633 r = amdgpu_ib_pool_init(adev);
1578 if (r) { 1634 if (r) {
1579 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 1635 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1580 return r; 1636 goto failed;
1581 } 1637 }
1582 1638
1583 r = amdgpu_ib_ring_tests(adev); 1639 r = amdgpu_ib_ring_tests(adev);
@@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1594 DRM_ERROR("registering register debugfs failed (%d).\n", r); 1650 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1595 } 1651 }
1596 1652
1653 r = amdgpu_debugfs_firmware_init(adev);
1654 if (r) {
1655 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1656 return r;
1657 }
1658
1597 if ((amdgpu_testing & 1)) { 1659 if ((amdgpu_testing & 1)) {
1598 if (adev->accel_working) 1660 if (adev->accel_working)
1599 amdgpu_test_moves(adev); 1661 amdgpu_test_moves(adev);
@@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1619 r = amdgpu_late_init(adev); 1681 r = amdgpu_late_init(adev);
1620 if (r) { 1682 if (r) {
1621 dev_err(adev->dev, "amdgpu_late_init failed\n"); 1683 dev_err(adev->dev, "amdgpu_late_init failed\n");
1622 return r; 1684 goto failed;
1623 } 1685 }
1624 1686
1625 return 0; 1687 return 0;
1688
1689failed:
1690 if (runtime)
1691 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1692 return r;
1626} 1693}
1627 1694
1628static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); 1695static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1656,6 +1723,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1656 kfree(adev->bios); 1723 kfree(adev->bios);
1657 adev->bios = NULL; 1724 adev->bios = NULL;
1658 vga_switcheroo_unregister_client(adev->pdev); 1725 vga_switcheroo_unregister_client(adev->pdev);
1726 if (adev->flags & AMD_IS_PX)
1727 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1659 vga_client_register(adev->pdev, NULL, NULL, NULL); 1728 vga_client_register(adev->pdev, NULL, NULL, NULL);
1660 if (adev->rio_mem) 1729 if (adev->rio_mem)
1661 pci_iounmap(adev->pdev, adev->rio_mem); 1730 pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1861,11 +1930,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1861 */ 1930 */
1862int amdgpu_gpu_reset(struct amdgpu_device *adev) 1931int amdgpu_gpu_reset(struct amdgpu_device *adev)
1863{ 1932{
1864 unsigned ring_sizes[AMDGPU_MAX_RINGS];
1865 uint32_t *ring_data[AMDGPU_MAX_RINGS];
1866
1867 bool saved = false;
1868
1869 int i, r; 1933 int i, r;
1870 int resched; 1934 int resched;
1871 1935
@@ -1874,22 +1938,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
1874 /* block TTM */ 1938 /* block TTM */
1875 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 1939 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1876 1940
1877 r = amdgpu_suspend(adev); 1941 /* block scheduler */
1878
1879 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1942 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1880 struct amdgpu_ring *ring = adev->rings[i]; 1943 struct amdgpu_ring *ring = adev->rings[i];
1944
1881 if (!ring) 1945 if (!ring)
1882 continue; 1946 continue;
1883 1947 kthread_park(ring->sched.thread);
1884 ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); 1948 amd_sched_hw_job_reset(&ring->sched);
1885 if (ring_sizes[i]) {
1886 saved = true;
1887 dev_info(adev->dev, "Saved %d dwords of commands "
1888 "on ring %d.\n", ring_sizes[i], i);
1889 }
1890 } 1949 }
1950 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
1951 amdgpu_fence_driver_force_completion(adev);
1952
1953 /* save scratch */
1954 amdgpu_atombios_scratch_regs_save(adev);
1955 r = amdgpu_suspend(adev);
1891 1956
1892retry: 1957retry:
1958 /* Disable fb access */
1959 if (adev->mode_info.num_crtc) {
1960 struct amdgpu_mode_mc_save save;
1961 amdgpu_display_stop_mc_access(adev, &save);
1962 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
1963 }
1964
1893 r = amdgpu_asic_reset(adev); 1965 r = amdgpu_asic_reset(adev);
1894 /* post card */ 1966 /* post card */
1895 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1967 amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1898,32 +1970,29 @@ retry:
1898 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 1970 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
1899 r = amdgpu_resume(adev); 1971 r = amdgpu_resume(adev);
1900 } 1972 }
1901 1973 /* restore scratch */
1974 amdgpu_atombios_scratch_regs_restore(adev);
1902 if (!r) { 1975 if (!r) {
1976 r = amdgpu_ib_ring_tests(adev);
1977 if (r) {
1978 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
1979 r = amdgpu_suspend(adev);
1980 goto retry;
1981 }
1982
1903 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1983 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1904 struct amdgpu_ring *ring = adev->rings[i]; 1984 struct amdgpu_ring *ring = adev->rings[i];
1905 if (!ring) 1985 if (!ring)
1906 continue; 1986 continue;
1907 1987 amd_sched_job_recovery(&ring->sched);
1908 amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); 1988 kthread_unpark(ring->sched.thread);
1909 ring_sizes[i] = 0;
1910 ring_data[i] = NULL;
1911 }
1912
1913 r = amdgpu_ib_ring_tests(adev);
1914 if (r) {
1915 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
1916 if (saved) {
1917 saved = false;
1918 r = amdgpu_suspend(adev);
1919 goto retry;
1920 }
1921 } 1989 }
1922 } else { 1990 } else {
1923 amdgpu_fence_driver_force_completion(adev); 1991 dev_err(adev->dev, "asic resume failed (%d).\n", r);
1924 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1992 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1925 if (adev->rings[i]) 1993 if (adev->rings[i]) {
1926 kfree(ring_data[i]); 1994 kthread_unpark(adev->rings[i]->sched.thread);
1995 }
1927 } 1996 }
1928 } 1997 }
1929 1998
@@ -1934,13 +2003,11 @@ retry:
1934 /* bad news, how to tell it to userspace ? */ 2003 /* bad news, how to tell it to userspace ? */
1935 dev_info(adev->dev, "GPU reset failed\n"); 2004 dev_info(adev->dev, "GPU reset failed\n");
1936 } 2005 }
2006 amdgpu_irq_gpu_reset_resume_helper(adev);
1937 2007
1938 return r; 2008 return r;
1939} 2009}
1940 2010
1941#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
1942#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
1943
1944void amdgpu_get_pcie_info(struct amdgpu_device *adev) 2011void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1945{ 2012{
1946 u32 mask; 2013 u32 mask;
@@ -2094,20 +2161,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2094 struct amdgpu_device *adev = f->f_inode->i_private; 2161 struct amdgpu_device *adev = f->f_inode->i_private;
2095 ssize_t result = 0; 2162 ssize_t result = 0;
2096 int r; 2163 int r;
2164 bool use_bank;
2165 unsigned instance_bank, sh_bank, se_bank;
2097 2166
2098 if (size & 0x3 || *pos & 0x3) 2167 if (size & 0x3 || *pos & 0x3)
2099 return -EINVAL; 2168 return -EINVAL;
2100 2169
2170 if (*pos & (1ULL << 62)) {
2171 se_bank = (*pos >> 24) & 0x3FF;
2172 sh_bank = (*pos >> 34) & 0x3FF;
2173 instance_bank = (*pos >> 44) & 0x3FF;
2174 use_bank = 1;
2175 *pos &= 0xFFFFFF;
2176 } else {
2177 use_bank = 0;
2178 }
2179
2180 if (use_bank) {
2181 if (sh_bank >= adev->gfx.config.max_sh_per_se ||
2182 se_bank >= adev->gfx.config.max_shader_engines)
2183 return -EINVAL;
2184 mutex_lock(&adev->grbm_idx_mutex);
2185 amdgpu_gfx_select_se_sh(adev, se_bank,
2186 sh_bank, instance_bank);
2187 }
2188
2101 while (size) { 2189 while (size) {
2102 uint32_t value; 2190 uint32_t value;
2103 2191
2104 if (*pos > adev->rmmio_size) 2192 if (*pos > adev->rmmio_size)
2105 return result; 2193 goto end;
2106 2194
2107 value = RREG32(*pos >> 2); 2195 value = RREG32(*pos >> 2);
2108 r = put_user(value, (uint32_t *)buf); 2196 r = put_user(value, (uint32_t *)buf);
2109 if (r) 2197 if (r) {
2110 return r; 2198 result = r;
2199 goto end;
2200 }
2111 2201
2112 result += 4; 2202 result += 4;
2113 buf += 4; 2203 buf += 4;
@@ -2115,6 +2205,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2115 size -= 4; 2205 size -= 4;
2116 } 2206 }
2117 2207
2208end:
2209 if (use_bank) {
2210 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2211 mutex_unlock(&adev->grbm_idx_mutex);
2212 }
2213
2118 return result; 2214 return result;
2119} 2215}
2120 2216
@@ -2314,6 +2410,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
2314 return result; 2410 return result;
2315} 2411}
2316 2412
2413static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2414 size_t size, loff_t *pos)
2415{
2416 struct amdgpu_device *adev = f->f_inode->i_private;
2417 ssize_t result = 0;
2418 int r;
2419 uint32_t *config, no_regs = 0;
2420
2421 if (size & 0x3 || *pos & 0x3)
2422 return -EINVAL;
2423
2424 config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
2425 if (!config)
2426 return -ENOMEM;
2427
2428 /* version, increment each time something is added */
2429 config[no_regs++] = 0;
2430 config[no_regs++] = adev->gfx.config.max_shader_engines;
2431 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2432 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2433 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2434 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2435 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2436 config[no_regs++] = adev->gfx.config.max_gprs;
2437 config[no_regs++] = adev->gfx.config.max_gs_threads;
2438 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2439 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2440 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2441 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2442 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2443 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2444 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2445 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2446 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2447 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2448 config[no_regs++] = adev->gfx.config.num_gpus;
2449 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2450 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2451 config[no_regs++] = adev->gfx.config.gb_addr_config;
2452 config[no_regs++] = adev->gfx.config.num_rbs;
2453
2454 while (size && (*pos < no_regs * 4)) {
2455 uint32_t value;
2456
2457 value = config[*pos >> 2];
2458 r = put_user(value, (uint32_t *)buf);
2459 if (r) {
2460 kfree(config);
2461 return r;
2462 }
2463
2464 result += 4;
2465 buf += 4;
2466 *pos += 4;
2467 size -= 4;
2468 }
2469
2470 kfree(config);
2471 return result;
2472}
2473
2474
2317static const struct file_operations amdgpu_debugfs_regs_fops = { 2475static const struct file_operations amdgpu_debugfs_regs_fops = {
2318 .owner = THIS_MODULE, 2476 .owner = THIS_MODULE,
2319 .read = amdgpu_debugfs_regs_read, 2477 .read = amdgpu_debugfs_regs_read,
@@ -2339,11 +2497,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
2339 .llseek = default_llseek 2497 .llseek = default_llseek
2340}; 2498};
2341 2499
2500static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2501 .owner = THIS_MODULE,
2502 .read = amdgpu_debugfs_gca_config_read,
2503 .llseek = default_llseek
2504};
2505
2342static const struct file_operations *debugfs_regs[] = { 2506static const struct file_operations *debugfs_regs[] = {
2343 &amdgpu_debugfs_regs_fops, 2507 &amdgpu_debugfs_regs_fops,
2344 &amdgpu_debugfs_regs_didt_fops, 2508 &amdgpu_debugfs_regs_didt_fops,
2345 &amdgpu_debugfs_regs_pcie_fops, 2509 &amdgpu_debugfs_regs_pcie_fops,
2346 &amdgpu_debugfs_regs_smc_fops, 2510 &amdgpu_debugfs_regs_smc_fops,
2511 &amdgpu_debugfs_gca_config_fops,
2347}; 2512};
2348 2513
2349static const char *debugfs_regs_names[] = { 2514static const char *debugfs_regs_names[] = {
@@ -2351,6 +2516,7 @@ static const char *debugfs_regs_names[] = {
2351 "amdgpu_regs_didt", 2516 "amdgpu_regs_didt",
2352 "amdgpu_regs_pcie", 2517 "amdgpu_regs_pcie",
2353 "amdgpu_regs_smc", 2518 "amdgpu_regs_smc",
2519 "amdgpu_gca_config",
2354}; 2520};
2355 2521
2356static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2522static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)