diff options
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 22 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 130 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 156 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 166 |
9 files changed, 245 insertions, 258 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6647fb26ef25..afc984806c4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -1708,7 +1708,7 @@ struct amdgpu_vce { | |||
| 1708 | /* | 1708 | /* |
| 1709 | * SDMA | 1709 | * SDMA |
| 1710 | */ | 1710 | */ |
| 1711 | struct amdgpu_sdma { | 1711 | struct amdgpu_sdma_instance { |
| 1712 | /* SDMA firmware */ | 1712 | /* SDMA firmware */ |
| 1713 | const struct firmware *fw; | 1713 | const struct firmware *fw; |
| 1714 | uint32_t fw_version; | 1714 | uint32_t fw_version; |
| @@ -1718,6 +1718,13 @@ struct amdgpu_sdma { | |||
| 1718 | bool burst_nop; | 1718 | bool burst_nop; |
| 1719 | }; | 1719 | }; |
| 1720 | 1720 | ||
| 1721 | struct amdgpu_sdma { | ||
| 1722 | struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; | ||
| 1723 | struct amdgpu_irq_src trap_irq; | ||
| 1724 | struct amdgpu_irq_src illegal_inst_irq; | ||
| 1725 | int num_instances; | ||
| 1726 | }; | ||
| 1727 | |||
| 1721 | /* | 1728 | /* |
| 1722 | * Firmware | 1729 | * Firmware |
| 1723 | */ | 1730 | */ |
| @@ -2064,9 +2071,7 @@ struct amdgpu_device { | |||
| 2064 | struct amdgpu_gfx gfx; | 2071 | struct amdgpu_gfx gfx; |
| 2065 | 2072 | ||
| 2066 | /* sdma */ | 2073 | /* sdma */ |
| 2067 | struct amdgpu_sdma sdma[AMDGPU_MAX_SDMA_INSTANCES]; | 2074 | struct amdgpu_sdma sdma; |
| 2068 | struct amdgpu_irq_src sdma_trap_irq; | ||
| 2069 | struct amdgpu_irq_src sdma_illegal_inst_irq; | ||
| 2070 | 2075 | ||
| 2071 | /* uvd */ | 2076 | /* uvd */ |
| 2072 | bool has_uvd; | 2077 | bool has_uvd; |
| @@ -2203,17 +2208,18 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | |||
| 2203 | ring->ring_free_dw--; | 2208 | ring->ring_free_dw--; |
| 2204 | } | 2209 | } |
| 2205 | 2210 | ||
| 2206 | static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | 2211 | static inline struct amdgpu_sdma_instance * |
| 2212 | amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | ||
| 2207 | { | 2213 | { |
| 2208 | struct amdgpu_device *adev = ring->adev; | 2214 | struct amdgpu_device *adev = ring->adev; |
| 2209 | int i; | 2215 | int i; |
| 2210 | 2216 | ||
| 2211 | for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) | 2217 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 2212 | if (&adev->sdma[i].ring == ring) | 2218 | if (&adev->sdma.instance[i].ring == ring) |
| 2213 | break; | 2219 | break; |
| 2214 | 2220 | ||
| 2215 | if (i < AMDGPU_MAX_SDMA_INSTANCES) | 2221 | if (i < AMDGPU_MAX_SDMA_INSTANCES) |
| 2216 | return &adev->sdma[i]; | 2222 | return &adev->sdma.instance[i]; |
| 2217 | else | 2223 | else |
| 2218 | return NULL; | 2224 | return NULL; |
| 2219 | } | 2225 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index dd2037bc0b4a..0e1376317683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | |||
| @@ -649,12 +649,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) | |||
| 649 | 649 | ||
| 650 | case KGD_ENGINE_SDMA1: | 650 | case KGD_ENGINE_SDMA1: |
| 651 | hdr = (const union amdgpu_firmware_header *) | 651 | hdr = (const union amdgpu_firmware_header *) |
| 652 | adev->sdma[0].fw->data; | 652 | adev->sdma.instance[0].fw->data; |
| 653 | break; | 653 | break; |
| 654 | 654 | ||
| 655 | case KGD_ENGINE_SDMA2: | 655 | case KGD_ENGINE_SDMA2: |
| 656 | hdr = (const union amdgpu_firmware_header *) | 656 | hdr = (const union amdgpu_firmware_header *) |
| 657 | adev->sdma[1].fw->data; | 657 | adev->sdma.instance[1].fw->data; |
| 658 | break; | 658 | break; |
| 659 | 659 | ||
| 660 | default: | 660 | default: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index dfd1d503bccf..79fa5c7de856 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | |||
| @@ -523,12 +523,12 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) | |||
| 523 | 523 | ||
| 524 | case KGD_ENGINE_SDMA1: | 524 | case KGD_ENGINE_SDMA1: |
| 525 | hdr = (const union amdgpu_firmware_header *) | 525 | hdr = (const union amdgpu_firmware_header *) |
| 526 | adev->sdma[0].fw->data; | 526 | adev->sdma.instance[0].fw->data; |
| 527 | break; | 527 | break; |
| 528 | 528 | ||
| 529 | case KGD_ENGINE_SDMA2: | 529 | case KGD_ENGINE_SDMA2: |
| 530 | hdr = (const union amdgpu_firmware_header *) | 530 | hdr = (const union amdgpu_firmware_header *) |
| 531 | adev->sdma[1].fw->data; | 531 | adev->sdma.instance[1].fw->data; |
| 532 | break; | 532 | break; |
| 533 | 533 | ||
| 534 | default: | 534 | default: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 749420f1ea6f..29fc45ce64dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -104,10 +104,11 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | |||
| 104 | } | 104 | } |
| 105 | break; | 105 | break; |
| 106 | case AMDGPU_HW_IP_DMA: | 106 | case AMDGPU_HW_IP_DMA: |
| 107 | if (ring < 2) { | 107 | if (ring < adev->sdma.num_instances) { |
| 108 | *out_ring = &adev->sdma[ring].ring; | 108 | *out_ring = &adev->sdma.instance[ring].ring; |
| 109 | } else { | 109 | } else { |
| 110 | DRM_ERROR("only two SDMA rings are supported\n"); | 110 | DRM_ERROR("only %d SDMA rings are supported\n", |
| 111 | adev->sdma.num_instances); | ||
| 111 | return -EINVAL; | 112 | return -EINVAL; |
| 112 | } | 113 | } |
| 113 | break; | 114 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 78233225dd36..3f5f2d58ad94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
| @@ -218,8 +218,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 218 | break; | 218 | break; |
| 219 | case AMDGPU_HW_IP_DMA: | 219 | case AMDGPU_HW_IP_DMA: |
| 220 | type = AMD_IP_BLOCK_TYPE_SDMA; | 220 | type = AMD_IP_BLOCK_TYPE_SDMA; |
| 221 | ring_mask = adev->sdma[0].ring.ready ? 1 : 0; | 221 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 222 | ring_mask |= ((adev->sdma[1].ring.ready ? 1 : 0) << 1); | 222 | ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i); |
| 223 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; | 223 | ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; |
| 224 | ib_size_alignment = 1; | 224 | ib_size_alignment = 1; |
| 225 | break; | 225 | break; |
| @@ -341,10 +341,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
| 341 | fw_info.feature = 0; | 341 | fw_info.feature = 0; |
| 342 | break; | 342 | break; |
| 343 | case AMDGPU_INFO_FW_SDMA: | 343 | case AMDGPU_INFO_FW_SDMA: |
| 344 | if (info->query_fw.index >= 2) | 344 | if (info->query_fw.index >= adev->sdma.num_instances) |
| 345 | return -EINVAL; | 345 | return -EINVAL; |
| 346 | fw_info.ver = adev->sdma[info->query_fw.index].fw_version; | 346 | fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version; |
| 347 | fw_info.feature = adev->sdma[info->query_fw.index].feature_version; | 347 | fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version; |
| 348 | break; | 348 | break; |
| 349 | default: | 349 | default: |
| 350 | return -EINVAL; | 350 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 30dce235ddeb..b13a74b273a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -540,8 +540,8 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) | |||
| 540 | static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); | 540 | static int amdgpu_gfx_index = offsetof(struct amdgpu_device, gfx.gfx_ring[0]); |
| 541 | static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); | 541 | static int cayman_cp1_index = offsetof(struct amdgpu_device, gfx.compute_ring[0]); |
| 542 | static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); | 542 | static int cayman_cp2_index = offsetof(struct amdgpu_device, gfx.compute_ring[1]); |
| 543 | static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma[0].ring); | 543 | static int amdgpu_dma1_index = offsetof(struct amdgpu_device, sdma.instance[0].ring); |
| 544 | static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma[1].ring); | 544 | static int amdgpu_dma2_index = offsetof(struct amdgpu_device, sdma.instance[1].ring); |
| 545 | static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); | 545 | static int r600_uvd_index = offsetof(struct amdgpu_device, uvd.ring); |
| 546 | static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); | 546 | static int si_vce1_index = offsetof(struct amdgpu_device, vce.ring[0]); |
| 547 | static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); | 547 | static int si_vce2_index = offsetof(struct amdgpu_device, vce.ring[1]); |
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 9ea9de457da3..814598e76c98 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
| @@ -96,7 +96,7 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) | |||
| 96 | { | 96 | { |
| 97 | const char *chip_name; | 97 | const char *chip_name; |
| 98 | char fw_name[30]; | 98 | char fw_name[30]; |
| 99 | int err, i; | 99 | int err = 0, i; |
| 100 | 100 | ||
| 101 | DRM_DEBUG("\n"); | 101 | DRM_DEBUG("\n"); |
| 102 | 102 | ||
| @@ -119,24 +119,24 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev) | |||
| 119 | default: BUG(); | 119 | default: BUG(); |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 122 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 123 | if (i == 0) | 123 | if (i == 0) |
| 124 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); | 124 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name); |
| 125 | else | 125 | else |
| 126 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); | 126 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name); |
| 127 | err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); | 127 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); |
| 128 | if (err) | 128 | if (err) |
| 129 | goto out; | 129 | goto out; |
| 130 | err = amdgpu_ucode_validate(adev->sdma[i].fw); | 130 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); |
| 131 | } | 131 | } |
| 132 | out: | 132 | out: |
| 133 | if (err) { | 133 | if (err) { |
| 134 | printk(KERN_ERR | 134 | printk(KERN_ERR |
| 135 | "cik_sdma: Failed to load firmware \"%s\"\n", | 135 | "cik_sdma: Failed to load firmware \"%s\"\n", |
| 136 | fw_name); | 136 | fw_name); |
| 137 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 137 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 138 | release_firmware(adev->sdma[i].fw); | 138 | release_firmware(adev->sdma.instance[i].fw); |
| 139 | adev->sdma[i].fw = NULL; | 139 | adev->sdma.instance[i].fw = NULL; |
| 140 | } | 140 | } |
| 141 | } | 141 | } |
| 142 | return err; | 142 | return err; |
| @@ -168,7 +168,7 @@ static uint32_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring) | |||
| 168 | static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) | 168 | static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) |
| 169 | { | 169 | { |
| 170 | struct amdgpu_device *adev = ring->adev; | 170 | struct amdgpu_device *adev = ring->adev; |
| 171 | u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; | 171 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
| 172 | 172 | ||
| 173 | return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; | 173 | return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2; |
| 174 | } | 174 | } |
| @@ -183,14 +183,14 @@ static uint32_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring) | |||
| 183 | static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) | 183 | static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring) |
| 184 | { | 184 | { |
| 185 | struct amdgpu_device *adev = ring->adev; | 185 | struct amdgpu_device *adev = ring->adev; |
| 186 | u32 me = (ring == &adev->sdma[0].ring) ? 0 : 1; | 186 | u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1; |
| 187 | 187 | ||
| 188 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); | 188 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], (ring->wptr << 2) & 0x3fffc); |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 191 | static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
| 192 | { | 192 | { |
| 193 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); | 193 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); |
| 194 | int i; | 194 | int i; |
| 195 | 195 | ||
| 196 | for (i = 0; i < count; i++) | 196 | for (i = 0; i < count; i++) |
| @@ -248,7 +248,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
| 248 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ | 248 | SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */ |
| 249 | u32 ref_and_mask; | 249 | u32 ref_and_mask; |
| 250 | 250 | ||
| 251 | if (ring == &ring->adev->sdma[0].ring) | 251 | if (ring == &ring->adev->sdma.instance[0].ring) |
| 252 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; | 252 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK; |
| 253 | else | 253 | else |
| 254 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; | 254 | ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK; |
| @@ -327,8 +327,8 @@ static bool cik_sdma_ring_emit_semaphore(struct amdgpu_ring *ring, | |||
| 327 | */ | 327 | */ |
| 328 | static void cik_sdma_gfx_stop(struct amdgpu_device *adev) | 328 | static void cik_sdma_gfx_stop(struct amdgpu_device *adev) |
| 329 | { | 329 | { |
| 330 | struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; | 330 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; |
| 331 | struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; | 331 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; |
| 332 | u32 rb_cntl; | 332 | u32 rb_cntl; |
| 333 | int i; | 333 | int i; |
| 334 | 334 | ||
| @@ -336,7 +336,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev) | |||
| 336 | (adev->mman.buffer_funcs_ring == sdma1)) | 336 | (adev->mman.buffer_funcs_ring == sdma1)) |
| 337 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | 337 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); |
| 338 | 338 | ||
| 339 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 339 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 340 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | 340 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); |
| 341 | rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; | 341 | rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK; |
| 342 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | 342 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); |
| @@ -376,7 +376,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) | |||
| 376 | cik_sdma_rlc_stop(adev); | 376 | cik_sdma_rlc_stop(adev); |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 379 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 380 | me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); | 380 | me_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); |
| 381 | if (enable) | 381 | if (enable) |
| 382 | me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; | 382 | me_cntl &= ~SDMA0_F32_CNTL__HALT_MASK; |
| @@ -402,8 +402,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 402 | u32 wb_offset; | 402 | u32 wb_offset; |
| 403 | int i, j, r; | 403 | int i, j, r; |
| 404 | 404 | ||
| 405 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 405 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 406 | ring = &adev->sdma[i].ring; | 406 | ring = &adev->sdma.instance[i].ring; |
| 407 | wb_offset = (ring->rptr_offs * 4); | 407 | wb_offset = (ring->rptr_offs * 4); |
| 408 | 408 | ||
| 409 | mutex_lock(&adev->srbm_mutex); | 409 | mutex_lock(&adev->srbm_mutex); |
| @@ -502,26 +502,25 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev) | |||
| 502 | u32 fw_size; | 502 | u32 fw_size; |
| 503 | int i, j; | 503 | int i, j; |
| 504 | 504 | ||
| 505 | if (!adev->sdma[0].fw || !adev->sdma[1].fw) | ||
| 506 | return -EINVAL; | ||
| 507 | |||
| 508 | /* halt the MEs */ | 505 | /* halt the MEs */ |
| 509 | cik_sdma_enable(adev, false); | 506 | cik_sdma_enable(adev, false); |
| 510 | 507 | ||
| 511 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 508 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 512 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | 509 | if (!adev->sdma.instance[i].fw) |
| 510 | return -EINVAL; | ||
| 511 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; | ||
| 513 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | 512 | amdgpu_ucode_print_sdma_hdr(&hdr->header); |
| 514 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | 513 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
| 515 | adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | 514 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); |
| 516 | adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); | 515 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); |
| 517 | if (adev->sdma[i].feature_version >= 20) | 516 | if (adev->sdma.instance[i].feature_version >= 20) |
| 518 | adev->sdma[i].burst_nop = true; | 517 | adev->sdma.instance[i].burst_nop = true; |
| 519 | fw_data = (const __le32 *) | 518 | fw_data = (const __le32 *) |
| 520 | (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | 519 | (adev->sdma.instance[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
| 521 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); | 520 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); |
| 522 | for (j = 0; j < fw_size; j++) | 521 | for (j = 0; j < fw_size; j++) |
| 523 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); | 522 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); |
| 524 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); | 523 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); |
| 525 | } | 524 | } |
| 526 | 525 | ||
| 527 | return 0; | 526 | return 0; |
| @@ -830,7 +829,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, | |||
| 830 | */ | 829 | */ |
| 831 | static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) | 830 | static void cik_sdma_vm_pad_ib(struct amdgpu_ib *ib) |
| 832 | { | 831 | { |
| 833 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); | 832 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); |
| 834 | u32 pad_count; | 833 | u32 pad_count; |
| 835 | int i; | 834 | int i; |
| 836 | 835 | ||
| @@ -934,6 +933,8 @@ static int cik_sdma_early_init(void *handle) | |||
| 934 | { | 933 | { |
| 935 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 934 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 936 | 935 | ||
| 936 | adev->sdma.num_instances = SDMA_MAX_INSTANCE; | ||
| 937 | |||
| 937 | cik_sdma_set_ring_funcs(adev); | 938 | cik_sdma_set_ring_funcs(adev); |
| 938 | cik_sdma_set_irq_funcs(adev); | 939 | cik_sdma_set_irq_funcs(adev); |
| 939 | cik_sdma_set_buffer_funcs(adev); | 940 | cik_sdma_set_buffer_funcs(adev); |
| @@ -946,7 +947,7 @@ static int cik_sdma_sw_init(void *handle) | |||
| 946 | { | 947 | { |
| 947 | struct amdgpu_ring *ring; | 948 | struct amdgpu_ring *ring; |
| 948 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 949 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 949 | int r; | 950 | int r, i; |
| 950 | 951 | ||
| 951 | r = cik_sdma_init_microcode(adev); | 952 | r = cik_sdma_init_microcode(adev); |
| 952 | if (r) { | 953 | if (r) { |
| @@ -955,43 +956,33 @@ static int cik_sdma_sw_init(void *handle) | |||
| 955 | } | 956 | } |
| 956 | 957 | ||
| 957 | /* SDMA trap event */ | 958 | /* SDMA trap event */ |
| 958 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); | 959 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); |
| 959 | if (r) | 960 | if (r) |
| 960 | return r; | 961 | return r; |
| 961 | 962 | ||
| 962 | /* SDMA Privileged inst */ | 963 | /* SDMA Privileged inst */ |
| 963 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); | 964 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq); |
| 964 | if (r) | 965 | if (r) |
| 965 | return r; | 966 | return r; |
| 966 | 967 | ||
| 967 | /* SDMA Privileged inst */ | 968 | /* SDMA Privileged inst */ |
| 968 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); | 969 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq); |
| 969 | if (r) | ||
| 970 | return r; | ||
| 971 | |||
| 972 | ring = &adev->sdma[0].ring; | ||
| 973 | ring->ring_obj = NULL; | ||
| 974 | |||
| 975 | ring = &adev->sdma[1].ring; | ||
| 976 | ring->ring_obj = NULL; | ||
| 977 | |||
| 978 | ring = &adev->sdma[0].ring; | ||
| 979 | sprintf(ring->name, "sdma0"); | ||
| 980 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
| 981 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, | ||
| 982 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, | ||
| 983 | AMDGPU_RING_TYPE_SDMA); | ||
| 984 | if (r) | 970 | if (r) |
| 985 | return r; | 971 | return r; |
| 986 | 972 | ||
| 987 | ring = &adev->sdma[1].ring; | 973 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 988 | sprintf(ring->name, "sdma1"); | 974 | ring = &adev->sdma.instance[i].ring; |
| 989 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | 975 | ring->ring_obj = NULL; |
| 990 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, | 976 | sprintf(ring->name, "sdma%d", i); |
| 991 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, | 977 | r = amdgpu_ring_init(adev, ring, 256 * 1024, |
| 992 | AMDGPU_RING_TYPE_SDMA); | 978 | SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0), 0xf, |
| 993 | if (r) | 979 | &adev->sdma.trap_irq, |
| 994 | return r; | 980 | (i == 0) ? |
| 981 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, | ||
| 982 | AMDGPU_RING_TYPE_SDMA); | ||
| 983 | if (r) | ||
| 984 | return r; | ||
| 985 | } | ||
| 995 | 986 | ||
| 996 | return r; | 987 | return r; |
| 997 | } | 988 | } |
| @@ -999,9 +990,10 @@ static int cik_sdma_sw_init(void *handle) | |||
| 999 | static int cik_sdma_sw_fini(void *handle) | 990 | static int cik_sdma_sw_fini(void *handle) |
| 1000 | { | 991 | { |
| 1001 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 992 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 993 | int i; | ||
| 1002 | 994 | ||
| 1003 | amdgpu_ring_fini(&adev->sdma[0].ring); | 995 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1004 | amdgpu_ring_fini(&adev->sdma[1].ring); | 996 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1005 | 997 | ||
| 1006 | return 0; | 998 | return 0; |
| 1007 | } | 999 | } |
| @@ -1078,7 +1070,7 @@ static void cik_sdma_print_status(void *handle) | |||
| 1078 | dev_info(adev->dev, "CIK SDMA registers\n"); | 1070 | dev_info(adev->dev, "CIK SDMA registers\n"); |
| 1079 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | 1071 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", |
| 1080 | RREG32(mmSRBM_STATUS2)); | 1072 | RREG32(mmSRBM_STATUS2)); |
| 1081 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 1073 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 1082 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", | 1074 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", |
| 1083 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); | 1075 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); |
| 1084 | dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", | 1076 | dev_info(adev->dev, " SDMA%d_ME_CNTL=0x%08X\n", |
| @@ -1223,7 +1215,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, | |||
| 1223 | case 0: | 1215 | case 0: |
| 1224 | switch (queue_id) { | 1216 | switch (queue_id) { |
| 1225 | case 0: | 1217 | case 0: |
| 1226 | amdgpu_fence_process(&adev->sdma[0].ring); | 1218 | amdgpu_fence_process(&adev->sdma.instance[0].ring); |
| 1227 | break; | 1219 | break; |
| 1228 | case 1: | 1220 | case 1: |
| 1229 | /* XXX compute */ | 1221 | /* XXX compute */ |
| @@ -1236,7 +1228,7 @@ static int cik_sdma_process_trap_irq(struct amdgpu_device *adev, | |||
| 1236 | case 1: | 1228 | case 1: |
| 1237 | switch (queue_id) { | 1229 | switch (queue_id) { |
| 1238 | case 0: | 1230 | case 0: |
| 1239 | amdgpu_fence_process(&adev->sdma[1].ring); | 1231 | amdgpu_fence_process(&adev->sdma.instance[1].ring); |
| 1240 | break; | 1232 | break; |
| 1241 | case 1: | 1233 | case 1: |
| 1242 | /* XXX compute */ | 1234 | /* XXX compute */ |
| @@ -1334,8 +1326,10 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { | |||
| 1334 | 1326 | ||
| 1335 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) | 1327 | static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) |
| 1336 | { | 1328 | { |
| 1337 | adev->sdma[0].ring.funcs = &cik_sdma_ring_funcs; | 1329 | int i; |
| 1338 | adev->sdma[1].ring.funcs = &cik_sdma_ring_funcs; | 1330 | |
| 1331 | for (i = 0; i < adev->sdma.num_instances; i++) | ||
| 1332 | adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs; | ||
| 1339 | } | 1333 | } |
| 1340 | 1334 | ||
| 1341 | static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { | 1335 | static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = { |
| @@ -1349,9 +1343,9 @@ static const struct amdgpu_irq_src_funcs cik_sdma_illegal_inst_irq_funcs = { | |||
| 1349 | 1343 | ||
| 1350 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) | 1344 | static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev) |
| 1351 | { | 1345 | { |
| 1352 | adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | 1346 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
| 1353 | adev->sdma_trap_irq.funcs = &cik_sdma_trap_irq_funcs; | 1347 | adev->sdma.trap_irq.funcs = &cik_sdma_trap_irq_funcs; |
| 1354 | adev->sdma_illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; | 1348 | adev->sdma.illegal_inst_irq.funcs = &cik_sdma_illegal_inst_irq_funcs; |
| 1355 | } | 1349 | } |
| 1356 | 1350 | ||
| 1357 | /** | 1351 | /** |
| @@ -1416,7 +1410,7 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) | |||
| 1416 | { | 1410 | { |
| 1417 | if (adev->mman.buffer_funcs == NULL) { | 1411 | if (adev->mman.buffer_funcs == NULL) { |
| 1418 | adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; | 1412 | adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; |
| 1419 | adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; | 1413 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; |
| 1420 | } | 1414 | } |
| 1421 | } | 1415 | } |
| 1422 | 1416 | ||
| @@ -1431,7 +1425,7 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
| 1431 | { | 1425 | { |
| 1432 | if (adev->vm_manager.vm_pte_funcs == NULL) { | 1426 | if (adev->vm_manager.vm_pte_funcs == NULL) { |
| 1433 | adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; | 1427 | adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; |
| 1434 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; | 1428 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; |
| 1435 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; | 1429 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; |
| 1436 | } | 1430 | } |
| 1437 | } | 1431 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 14e87234171a..f8b868c7c496 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
| @@ -118,7 +118,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) | |||
| 118 | { | 118 | { |
| 119 | const char *chip_name; | 119 | const char *chip_name; |
| 120 | char fw_name[30]; | 120 | char fw_name[30]; |
| 121 | int err, i; | 121 | int err = 0, i; |
| 122 | struct amdgpu_firmware_info *info = NULL; | 122 | struct amdgpu_firmware_info *info = NULL; |
| 123 | const struct common_firmware_header *header = NULL; | 123 | const struct common_firmware_header *header = NULL; |
| 124 | const struct sdma_firmware_header_v1_0 *hdr; | 124 | const struct sdma_firmware_header_v1_0 *hdr; |
| @@ -132,27 +132,27 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev) | |||
| 132 | default: BUG(); | 132 | default: BUG(); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 135 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 136 | if (i == 0) | 136 | if (i == 0) |
| 137 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); | 137 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); |
| 138 | else | 138 | else |
| 139 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); | 139 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); |
| 140 | err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); | 140 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); |
| 141 | if (err) | 141 | if (err) |
| 142 | goto out; | 142 | goto out; |
| 143 | err = amdgpu_ucode_validate(adev->sdma[i].fw); | 143 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); |
| 144 | if (err) | 144 | if (err) |
| 145 | goto out; | 145 | goto out; |
| 146 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | 146 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; |
| 147 | adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | 147 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); |
| 148 | adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); | 148 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); |
| 149 | if (adev->sdma[i].feature_version >= 20) | 149 | if (adev->sdma.instance[i].feature_version >= 20) |
| 150 | adev->sdma[i].burst_nop = true; | 150 | adev->sdma.instance[i].burst_nop = true; |
| 151 | 151 | ||
| 152 | if (adev->firmware.smu_load) { | 152 | if (adev->firmware.smu_load) { |
| 153 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; | 153 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; |
| 154 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | 154 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; |
| 155 | info->fw = adev->sdma[i].fw; | 155 | info->fw = adev->sdma.instance[i].fw; |
| 156 | header = (const struct common_firmware_header *)info->fw->data; | 156 | header = (const struct common_firmware_header *)info->fw->data; |
| 157 | adev->firmware.fw_size += | 157 | adev->firmware.fw_size += |
| 158 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | 158 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); |
| @@ -164,9 +164,9 @@ out: | |||
| 164 | printk(KERN_ERR | 164 | printk(KERN_ERR |
| 165 | "sdma_v2_4: Failed to load firmware \"%s\"\n", | 165 | "sdma_v2_4: Failed to load firmware \"%s\"\n", |
| 166 | fw_name); | 166 | fw_name); |
| 167 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 167 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 168 | release_firmware(adev->sdma[i].fw); | 168 | release_firmware(adev->sdma.instance[i].fw); |
| 169 | adev->sdma[i].fw = NULL; | 169 | adev->sdma.instance[i].fw = NULL; |
| 170 | } | 170 | } |
| 171 | } | 171 | } |
| 172 | return err; | 172 | return err; |
| @@ -199,7 +199,7 @@ static uint32_t sdma_v2_4_ring_get_rptr(struct amdgpu_ring *ring) | |||
| 199 | static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) | 199 | static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) |
| 200 | { | 200 | { |
| 201 | struct amdgpu_device *adev = ring->adev; | 201 | struct amdgpu_device *adev = ring->adev; |
| 202 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | 202 | int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; |
| 203 | u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; | 203 | u32 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; |
| 204 | 204 | ||
| 205 | return wptr; | 205 | return wptr; |
| @@ -215,14 +215,14 @@ static uint32_t sdma_v2_4_ring_get_wptr(struct amdgpu_ring *ring) | |||
| 215 | static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) | 215 | static void sdma_v2_4_ring_set_wptr(struct amdgpu_ring *ring) |
| 216 | { | 216 | { |
| 217 | struct amdgpu_device *adev = ring->adev; | 217 | struct amdgpu_device *adev = ring->adev; |
| 218 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | 218 | int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; |
| 219 | 219 | ||
| 220 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); | 220 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 223 | static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
| 224 | { | 224 | { |
| 225 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); | 225 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); |
| 226 | int i; | 226 | int i; |
| 227 | 227 | ||
| 228 | for (i = 0; i < count; i++) | 228 | for (i = 0; i < count; i++) |
| @@ -284,7 +284,7 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
| 284 | { | 284 | { |
| 285 | u32 ref_and_mask = 0; | 285 | u32 ref_and_mask = 0; |
| 286 | 286 | ||
| 287 | if (ring == &ring->adev->sdma[0].ring) | 287 | if (ring == &ring->adev->sdma.instance[0].ring) |
| 288 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); | 288 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); |
| 289 | else | 289 | else |
| 290 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); | 290 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); |
| @@ -368,8 +368,8 @@ static bool sdma_v2_4_ring_emit_semaphore(struct amdgpu_ring *ring, | |||
| 368 | */ | 368 | */ |
| 369 | static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) | 369 | static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) |
| 370 | { | 370 | { |
| 371 | struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; | 371 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; |
| 372 | struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; | 372 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; |
| 373 | u32 rb_cntl, ib_cntl; | 373 | u32 rb_cntl, ib_cntl; |
| 374 | int i; | 374 | int i; |
| 375 | 375 | ||
| @@ -377,7 +377,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev) | |||
| 377 | (adev->mman.buffer_funcs_ring == sdma1)) | 377 | (adev->mman.buffer_funcs_ring == sdma1)) |
| 378 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | 378 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); |
| 379 | 379 | ||
| 380 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 380 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 381 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | 381 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); |
| 382 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); | 382 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); |
| 383 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | 383 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); |
| @@ -419,7 +419,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) | |||
| 419 | sdma_v2_4_rlc_stop(adev); | 419 | sdma_v2_4_rlc_stop(adev); |
| 420 | } | 420 | } |
| 421 | 421 | ||
| 422 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 422 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 423 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); | 423 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); |
| 424 | if (enable) | 424 | if (enable) |
| 425 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); | 425 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); |
| @@ -445,8 +445,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 445 | u32 wb_offset; | 445 | u32 wb_offset; |
| 446 | int i, j, r; | 446 | int i, j, r; |
| 447 | 447 | ||
| 448 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 448 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 449 | ring = &adev->sdma[i].ring; | 449 | ring = &adev->sdma.instance[i].ring; |
| 450 | wb_offset = (ring->rptr_offs * 4); | 450 | wb_offset = (ring->rptr_offs * 4); |
| 451 | 451 | ||
| 452 | mutex_lock(&adev->srbm_mutex); | 452 | mutex_lock(&adev->srbm_mutex); |
| @@ -545,29 +545,23 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) | |||
| 545 | const __le32 *fw_data; | 545 | const __le32 *fw_data; |
| 546 | u32 fw_size; | 546 | u32 fw_size; |
| 547 | int i, j; | 547 | int i, j; |
| 548 | bool smc_loads_fw = false; /* XXX fix me */ | ||
| 549 | |||
| 550 | if (!adev->sdma[0].fw || !adev->sdma[1].fw) | ||
| 551 | return -EINVAL; | ||
| 552 | 548 | ||
| 553 | /* halt the MEs */ | 549 | /* halt the MEs */ |
| 554 | sdma_v2_4_enable(adev, false); | 550 | sdma_v2_4_enable(adev, false); |
| 555 | 551 | ||
| 556 | if (smc_loads_fw) { | 552 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 557 | /* XXX query SMC for fw load complete */ | 553 | if (!adev->sdma.instance[i].fw) |
| 558 | } else { | 554 | return -EINVAL; |
| 559 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 555 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; |
| 560 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | 556 | amdgpu_ucode_print_sdma_hdr(&hdr->header); |
| 561 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | 557 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
| 562 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | 558 | fw_data = (const __le32 *) |
| 563 | fw_data = (const __le32 *) | 559 | (adev->sdma.instance[i].fw->data + |
| 564 | (adev->sdma[i].fw->data + | 560 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
| 565 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | 561 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); |
| 566 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); | 562 | for (j = 0; j < fw_size; j++) |
| 567 | for (j = 0; j < fw_size; j++) | 563 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); |
| 568 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); | 564 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); |
| 569 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); | ||
| 570 | } | ||
| 571 | } | 565 | } |
| 572 | 566 | ||
| 573 | return 0; | 567 | return 0; |
| @@ -894,7 +888,7 @@ static void sdma_v2_4_vm_set_pte_pde(struct amdgpu_ib *ib, | |||
| 894 | */ | 888 | */ |
| 895 | static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) | 889 | static void sdma_v2_4_vm_pad_ib(struct amdgpu_ib *ib) |
| 896 | { | 890 | { |
| 897 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); | 891 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); |
| 898 | u32 pad_count; | 892 | u32 pad_count; |
| 899 | int i; | 893 | int i; |
| 900 | 894 | ||
| @@ -952,6 +946,8 @@ static int sdma_v2_4_early_init(void *handle) | |||
| 952 | { | 946 | { |
| 953 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 947 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 954 | 948 | ||
| 949 | adev->sdma.num_instances = SDMA_MAX_INSTANCE; | ||
| 950 | |||
| 955 | sdma_v2_4_set_ring_funcs(adev); | 951 | sdma_v2_4_set_ring_funcs(adev); |
| 956 | sdma_v2_4_set_buffer_funcs(adev); | 952 | sdma_v2_4_set_buffer_funcs(adev); |
| 957 | sdma_v2_4_set_vm_pte_funcs(adev); | 953 | sdma_v2_4_set_vm_pte_funcs(adev); |
| @@ -963,21 +959,21 @@ static int sdma_v2_4_early_init(void *handle) | |||
| 963 | static int sdma_v2_4_sw_init(void *handle) | 959 | static int sdma_v2_4_sw_init(void *handle) |
| 964 | { | 960 | { |
| 965 | struct amdgpu_ring *ring; | 961 | struct amdgpu_ring *ring; |
| 966 | int r; | 962 | int r, i; |
| 967 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 963 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 968 | 964 | ||
| 969 | /* SDMA trap event */ | 965 | /* SDMA trap event */ |
| 970 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); | 966 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); |
| 971 | if (r) | 967 | if (r) |
| 972 | return r; | 968 | return r; |
| 973 | 969 | ||
| 974 | /* SDMA Privileged inst */ | 970 | /* SDMA Privileged inst */ |
| 975 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); | 971 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq); |
| 976 | if (r) | 972 | if (r) |
| 977 | return r; | 973 | return r; |
| 978 | 974 | ||
| 979 | /* SDMA Privileged inst */ | 975 | /* SDMA Privileged inst */ |
| 980 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); | 976 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq); |
| 981 | if (r) | 977 | if (r) |
| 982 | return r; | 978 | return r; |
| 983 | 979 | ||
| @@ -987,31 +983,20 @@ static int sdma_v2_4_sw_init(void *handle) | |||
| 987 | return r; | 983 | return r; |
| 988 | } | 984 | } |
| 989 | 985 | ||
| 990 | ring = &adev->sdma[0].ring; | 986 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 991 | ring->ring_obj = NULL; | 987 | ring = &adev->sdma.instance[i].ring; |
| 992 | ring->use_doorbell = false; | 988 | ring->ring_obj = NULL; |
| 993 | 989 | ring->use_doorbell = false; | |
| 994 | ring = &adev->sdma[1].ring; | 990 | sprintf(ring->name, "sdma%d", i); |
| 995 | ring->ring_obj = NULL; | 991 | r = amdgpu_ring_init(adev, ring, 256 * 1024, |
| 996 | ring->use_doorbell = false; | 992 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, |
| 997 | 993 | &adev->sdma.trap_irq, | |
| 998 | ring = &adev->sdma[0].ring; | 994 | (i == 0) ? |
| 999 | sprintf(ring->name, "sdma0"); | 995 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, |
| 1000 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | 996 | AMDGPU_RING_TYPE_SDMA); |
| 1001 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | 997 | if (r) |
| 1002 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, | 998 | return r; |
| 1003 | AMDGPU_RING_TYPE_SDMA); | 999 | } |
| 1004 | if (r) | ||
| 1005 | return r; | ||
| 1006 | |||
| 1007 | ring = &adev->sdma[1].ring; | ||
| 1008 | sprintf(ring->name, "sdma1"); | ||
| 1009 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
| 1010 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
| 1011 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, | ||
| 1012 | AMDGPU_RING_TYPE_SDMA); | ||
| 1013 | if (r) | ||
| 1014 | return r; | ||
| 1015 | 1000 | ||
| 1016 | return r; | 1001 | return r; |
| 1017 | } | 1002 | } |
| @@ -1019,9 +1004,10 @@ static int sdma_v2_4_sw_init(void *handle) | |||
| 1019 | static int sdma_v2_4_sw_fini(void *handle) | 1004 | static int sdma_v2_4_sw_fini(void *handle) |
| 1020 | { | 1005 | { |
| 1021 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1006 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1007 | int i; | ||
| 1022 | 1008 | ||
| 1023 | amdgpu_ring_fini(&adev->sdma[0].ring); | 1009 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1024 | amdgpu_ring_fini(&adev->sdma[1].ring); | 1010 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1025 | 1011 | ||
| 1026 | return 0; | 1012 | return 0; |
| 1027 | } | 1013 | } |
| @@ -1100,7 +1086,7 @@ static void sdma_v2_4_print_status(void *handle) | |||
| 1100 | dev_info(adev->dev, "VI SDMA registers\n"); | 1086 | dev_info(adev->dev, "VI SDMA registers\n"); |
| 1101 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | 1087 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", |
| 1102 | RREG32(mmSRBM_STATUS2)); | 1088 | RREG32(mmSRBM_STATUS2)); |
| 1103 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 1089 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 1104 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", | 1090 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", |
| 1105 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); | 1091 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); |
| 1106 | dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", | 1092 | dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", |
| @@ -1243,7 +1229,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, | |||
| 1243 | case 0: | 1229 | case 0: |
| 1244 | switch (queue_id) { | 1230 | switch (queue_id) { |
| 1245 | case 0: | 1231 | case 0: |
| 1246 | amdgpu_fence_process(&adev->sdma[0].ring); | 1232 | amdgpu_fence_process(&adev->sdma.instance[0].ring); |
| 1247 | break; | 1233 | break; |
| 1248 | case 1: | 1234 | case 1: |
| 1249 | /* XXX compute */ | 1235 | /* XXX compute */ |
| @@ -1256,7 +1242,7 @@ static int sdma_v2_4_process_trap_irq(struct amdgpu_device *adev, | |||
| 1256 | case 1: | 1242 | case 1: |
| 1257 | switch (queue_id) { | 1243 | switch (queue_id) { |
| 1258 | case 0: | 1244 | case 0: |
| 1259 | amdgpu_fence_process(&adev->sdma[1].ring); | 1245 | amdgpu_fence_process(&adev->sdma.instance[1].ring); |
| 1260 | break; | 1246 | break; |
| 1261 | case 1: | 1247 | case 1: |
| 1262 | /* XXX compute */ | 1248 | /* XXX compute */ |
| @@ -1345,8 +1331,10 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { | |||
| 1345 | 1331 | ||
| 1346 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) | 1332 | static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) |
| 1347 | { | 1333 | { |
| 1348 | adev->sdma[0].ring.funcs = &sdma_v2_4_ring_funcs; | 1334 | int i; |
| 1349 | adev->sdma[1].ring.funcs = &sdma_v2_4_ring_funcs; | 1335 | |
| 1336 | for (i = 0; i < adev->sdma.num_instances; i++) | ||
| 1337 | adev->sdma.instance[i].ring.funcs = &sdma_v2_4_ring_funcs; | ||
| 1350 | } | 1338 | } |
| 1351 | 1339 | ||
| 1352 | static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { | 1340 | static const struct amdgpu_irq_src_funcs sdma_v2_4_trap_irq_funcs = { |
| @@ -1360,9 +1348,9 @@ static const struct amdgpu_irq_src_funcs sdma_v2_4_illegal_inst_irq_funcs = { | |||
| 1360 | 1348 | ||
| 1361 | static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) | 1349 | static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev) |
| 1362 | { | 1350 | { |
| 1363 | adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | 1351 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
| 1364 | adev->sdma_trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; | 1352 | adev->sdma.trap_irq.funcs = &sdma_v2_4_trap_irq_funcs; |
| 1365 | adev->sdma_illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; | 1353 | adev->sdma.illegal_inst_irq.funcs = &sdma_v2_4_illegal_inst_irq_funcs; |
| 1366 | } | 1354 | } |
| 1367 | 1355 | ||
| 1368 | /** | 1356 | /** |
| @@ -1428,7 +1416,7 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) | |||
| 1428 | { | 1416 | { |
| 1429 | if (adev->mman.buffer_funcs == NULL) { | 1417 | if (adev->mman.buffer_funcs == NULL) { |
| 1430 | adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; | 1418 | adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; |
| 1431 | adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; | 1419 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; |
| 1432 | } | 1420 | } |
| 1433 | } | 1421 | } |
| 1434 | 1422 | ||
| @@ -1443,7 +1431,7 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
| 1443 | { | 1431 | { |
| 1444 | if (adev->vm_manager.vm_pte_funcs == NULL) { | 1432 | if (adev->vm_manager.vm_pte_funcs == NULL) { |
| 1445 | adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; | 1433 | adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; |
| 1446 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; | 1434 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; |
| 1447 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; | 1435 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; |
| 1448 | } | 1436 | } |
| 1449 | } | 1437 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 9bfe92df15f7..670555a45da9 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -184,7 +184,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) | |||
| 184 | { | 184 | { |
| 185 | const char *chip_name; | 185 | const char *chip_name; |
| 186 | char fw_name[30]; | 186 | char fw_name[30]; |
| 187 | int err, i; | 187 | int err = 0, i; |
| 188 | struct amdgpu_firmware_info *info = NULL; | 188 | struct amdgpu_firmware_info *info = NULL; |
| 189 | const struct common_firmware_header *header = NULL; | 189 | const struct common_firmware_header *header = NULL; |
| 190 | const struct sdma_firmware_header_v1_0 *hdr; | 190 | const struct sdma_firmware_header_v1_0 *hdr; |
| @@ -204,27 +204,27 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) | |||
| 204 | default: BUG(); | 204 | default: BUG(); |
| 205 | } | 205 | } |
| 206 | 206 | ||
| 207 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 207 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 208 | if (i == 0) | 208 | if (i == 0) |
| 209 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); | 209 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); |
| 210 | else | 210 | else |
| 211 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); | 211 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); |
| 212 | err = request_firmware(&adev->sdma[i].fw, fw_name, adev->dev); | 212 | err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); |
| 213 | if (err) | 213 | if (err) |
| 214 | goto out; | 214 | goto out; |
| 215 | err = amdgpu_ucode_validate(adev->sdma[i].fw); | 215 | err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); |
| 216 | if (err) | 216 | if (err) |
| 217 | goto out; | 217 | goto out; |
| 218 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | 218 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; |
| 219 | adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); | 219 | adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); |
| 220 | adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); | 220 | adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); |
| 221 | if (adev->sdma[i].feature_version >= 20) | 221 | if (adev->sdma.instance[i].feature_version >= 20) |
| 222 | adev->sdma[i].burst_nop = true; | 222 | adev->sdma.instance[i].burst_nop = true; |
| 223 | 223 | ||
| 224 | if (adev->firmware.smu_load) { | 224 | if (adev->firmware.smu_load) { |
| 225 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; | 225 | info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; |
| 226 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; | 226 | info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; |
| 227 | info->fw = adev->sdma[i].fw; | 227 | info->fw = adev->sdma.instance[i].fw; |
| 228 | header = (const struct common_firmware_header *)info->fw->data; | 228 | header = (const struct common_firmware_header *)info->fw->data; |
| 229 | adev->firmware.fw_size += | 229 | adev->firmware.fw_size += |
| 230 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); | 230 | ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); |
| @@ -235,9 +235,9 @@ out: | |||
| 235 | printk(KERN_ERR | 235 | printk(KERN_ERR |
| 236 | "sdma_v3_0: Failed to load firmware \"%s\"\n", | 236 | "sdma_v3_0: Failed to load firmware \"%s\"\n", |
| 237 | fw_name); | 237 | fw_name); |
| 238 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 238 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 239 | release_firmware(adev->sdma[i].fw); | 239 | release_firmware(adev->sdma.instance[i].fw); |
| 240 | adev->sdma[i].fw = NULL; | 240 | adev->sdma.instance[i].fw = NULL; |
| 241 | } | 241 | } |
| 242 | } | 242 | } |
| 243 | return err; | 243 | return err; |
| @@ -276,7 +276,7 @@ static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | |||
| 276 | /* XXX check if swapping is necessary on BE */ | 276 | /* XXX check if swapping is necessary on BE */ |
| 277 | wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; | 277 | wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; |
| 278 | } else { | 278 | } else { |
| 279 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | 279 | int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; |
| 280 | 280 | ||
| 281 | wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; | 281 | wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) >> 2; |
| 282 | } | 282 | } |
| @@ -300,7 +300,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 300 | adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; | 300 | adev->wb.wb[ring->wptr_offs] = ring->wptr << 2; |
| 301 | WDOORBELL32(ring->doorbell_index, ring->wptr << 2); | 301 | WDOORBELL32(ring->doorbell_index, ring->wptr << 2); |
| 302 | } else { | 302 | } else { |
| 303 | int me = (ring == &ring->adev->sdma[0].ring) ? 0 : 1; | 303 | int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1; |
| 304 | 304 | ||
| 305 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); | 305 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me], ring->wptr << 2); |
| 306 | } | 306 | } |
| @@ -308,7 +308,7 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
| 308 | 308 | ||
| 309 | static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) | 309 | static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) |
| 310 | { | 310 | { |
| 311 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ring); | 311 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring); |
| 312 | int i; | 312 | int i; |
| 313 | 313 | ||
| 314 | for (i = 0; i < count; i++) | 314 | for (i = 0; i < count; i++) |
| @@ -369,7 +369,7 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) | |||
| 369 | { | 369 | { |
| 370 | u32 ref_and_mask = 0; | 370 | u32 ref_and_mask = 0; |
| 371 | 371 | ||
| 372 | if (ring == &ring->adev->sdma[0].ring) | 372 | if (ring == &ring->adev->sdma.instance[0].ring) |
| 373 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); | 373 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); |
| 374 | else | 374 | else |
| 375 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); | 375 | ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); |
| @@ -454,8 +454,8 @@ static bool sdma_v3_0_ring_emit_semaphore(struct amdgpu_ring *ring, | |||
| 454 | */ | 454 | */ |
| 455 | static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) | 455 | static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) |
| 456 | { | 456 | { |
| 457 | struct amdgpu_ring *sdma0 = &adev->sdma[0].ring; | 457 | struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; |
| 458 | struct amdgpu_ring *sdma1 = &adev->sdma[1].ring; | 458 | struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; |
| 459 | u32 rb_cntl, ib_cntl; | 459 | u32 rb_cntl, ib_cntl; |
| 460 | int i; | 460 | int i; |
| 461 | 461 | ||
| @@ -463,7 +463,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) | |||
| 463 | (adev->mman.buffer_funcs_ring == sdma1)) | 463 | (adev->mman.buffer_funcs_ring == sdma1)) |
| 464 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | 464 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); |
| 465 | 465 | ||
| 466 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 466 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 467 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); | 467 | rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); |
| 468 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); | 468 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); |
| 469 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); | 469 | WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); |
| @@ -500,7 +500,7 @@ static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) | |||
| 500 | u32 f32_cntl; | 500 | u32 f32_cntl; |
| 501 | int i; | 501 | int i; |
| 502 | 502 | ||
| 503 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 503 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 504 | f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); | 504 | f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); |
| 505 | if (enable) | 505 | if (enable) |
| 506 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, | 506 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, |
| @@ -530,7 +530,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) | |||
| 530 | sdma_v3_0_rlc_stop(adev); | 530 | sdma_v3_0_rlc_stop(adev); |
| 531 | } | 531 | } |
| 532 | 532 | ||
| 533 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 533 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 534 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); | 534 | f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); |
| 535 | if (enable) | 535 | if (enable) |
| 536 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); | 536 | f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); |
| @@ -557,8 +557,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 557 | u32 doorbell; | 557 | u32 doorbell; |
| 558 | int i, j, r; | 558 | int i, j, r; |
| 559 | 559 | ||
| 560 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 560 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 561 | ring = &adev->sdma[i].ring; | 561 | ring = &adev->sdma.instance[i].ring; |
| 562 | wb_offset = (ring->rptr_offs * 4); | 562 | wb_offset = (ring->rptr_offs * 4); |
| 563 | 563 | ||
| 564 | mutex_lock(&adev->srbm_mutex); | 564 | mutex_lock(&adev->srbm_mutex); |
| @@ -669,23 +669,22 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) | |||
| 669 | u32 fw_size; | 669 | u32 fw_size; |
| 670 | int i, j; | 670 | int i, j; |
| 671 | 671 | ||
| 672 | if (!adev->sdma[0].fw || !adev->sdma[1].fw) | ||
| 673 | return -EINVAL; | ||
| 674 | |||
| 675 | /* halt the MEs */ | 672 | /* halt the MEs */ |
| 676 | sdma_v3_0_enable(adev, false); | 673 | sdma_v3_0_enable(adev, false); |
| 677 | 674 | ||
| 678 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 675 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 679 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; | 676 | if (!adev->sdma.instance[i].fw) |
| 677 | return -EINVAL; | ||
| 678 | hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; | ||
| 680 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | 679 | amdgpu_ucode_print_sdma_hdr(&hdr->header); |
| 681 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | 680 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
| 682 | fw_data = (const __le32 *) | 681 | fw_data = (const __le32 *) |
| 683 | (adev->sdma[i].fw->data + | 682 | (adev->sdma.instance[i].fw->data + |
| 684 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | 683 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
| 685 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); | 684 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); |
| 686 | for (j = 0; j < fw_size; j++) | 685 | for (j = 0; j < fw_size; j++) |
| 687 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); | 686 | WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); |
| 688 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma[i].fw_version); | 687 | WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); |
| 689 | } | 688 | } |
| 690 | 689 | ||
| 691 | return 0; | 690 | return 0; |
| @@ -701,21 +700,21 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) | |||
| 701 | */ | 700 | */ |
| 702 | static int sdma_v3_0_start(struct amdgpu_device *adev) | 701 | static int sdma_v3_0_start(struct amdgpu_device *adev) |
| 703 | { | 702 | { |
| 704 | int r; | 703 | int r, i; |
| 705 | 704 | ||
| 706 | if (!adev->firmware.smu_load) { | 705 | if (!adev->firmware.smu_load) { |
| 707 | r = sdma_v3_0_load_microcode(adev); | 706 | r = sdma_v3_0_load_microcode(adev); |
| 708 | if (r) | 707 | if (r) |
| 709 | return r; | 708 | return r; |
| 710 | } else { | 709 | } else { |
| 711 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | 710 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 712 | AMDGPU_UCODE_ID_SDMA0); | 711 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, |
| 713 | if (r) | 712 | (i == 0) ? |
| 714 | return -EINVAL; | 713 | AMDGPU_UCODE_ID_SDMA0 : |
| 715 | r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, | 714 | AMDGPU_UCODE_ID_SDMA1); |
| 716 | AMDGPU_UCODE_ID_SDMA1); | 715 | if (r) |
| 717 | if (r) | 716 | return -EINVAL; |
| 718 | return -EINVAL; | 717 | } |
| 719 | } | 718 | } |
| 720 | 719 | ||
| 721 | /* unhalt the MEs */ | 720 | /* unhalt the MEs */ |
| @@ -1013,7 +1012,7 @@ static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, | |||
| 1013 | */ | 1012 | */ |
| 1014 | static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) | 1013 | static void sdma_v3_0_vm_pad_ib(struct amdgpu_ib *ib) |
| 1015 | { | 1014 | { |
| 1016 | struct amdgpu_sdma *sdma = amdgpu_get_sdma_instance(ib->ring); | 1015 | struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ib->ring); |
| 1017 | u32 pad_count; | 1016 | u32 pad_count; |
| 1018 | int i; | 1017 | int i; |
| 1019 | 1018 | ||
| @@ -1071,6 +1070,12 @@ static int sdma_v3_0_early_init(void *handle) | |||
| 1071 | { | 1070 | { |
| 1072 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1071 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1073 | 1072 | ||
| 1073 | switch (adev->asic_type) { | ||
| 1074 | default: | ||
| 1075 | adev->sdma.num_instances = SDMA_MAX_INSTANCE; | ||
| 1076 | break; | ||
| 1077 | } | ||
| 1078 | |||
| 1074 | sdma_v3_0_set_ring_funcs(adev); | 1079 | sdma_v3_0_set_ring_funcs(adev); |
| 1075 | sdma_v3_0_set_buffer_funcs(adev); | 1080 | sdma_v3_0_set_buffer_funcs(adev); |
| 1076 | sdma_v3_0_set_vm_pte_funcs(adev); | 1081 | sdma_v3_0_set_vm_pte_funcs(adev); |
| @@ -1082,21 +1087,21 @@ static int sdma_v3_0_early_init(void *handle) | |||
| 1082 | static int sdma_v3_0_sw_init(void *handle) | 1087 | static int sdma_v3_0_sw_init(void *handle) |
| 1083 | { | 1088 | { |
| 1084 | struct amdgpu_ring *ring; | 1089 | struct amdgpu_ring *ring; |
| 1085 | int r; | 1090 | int r, i; |
| 1086 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1091 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1087 | 1092 | ||
| 1088 | /* SDMA trap event */ | 1093 | /* SDMA trap event */ |
| 1089 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma_trap_irq); | 1094 | r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq); |
| 1090 | if (r) | 1095 | if (r) |
| 1091 | return r; | 1096 | return r; |
| 1092 | 1097 | ||
| 1093 | /* SDMA Privileged inst */ | 1098 | /* SDMA Privileged inst */ |
| 1094 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma_illegal_inst_irq); | 1099 | r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq); |
| 1095 | if (r) | 1100 | if (r) |
| 1096 | return r; | 1101 | return r; |
| 1097 | 1102 | ||
| 1098 | /* SDMA Privileged inst */ | 1103 | /* SDMA Privileged inst */ |
| 1099 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma_illegal_inst_irq); | 1104 | r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq); |
| 1100 | if (r) | 1105 | if (r) |
| 1101 | return r; | 1106 | return r; |
| 1102 | 1107 | ||
| @@ -1106,33 +1111,23 @@ static int sdma_v3_0_sw_init(void *handle) | |||
| 1106 | return r; | 1111 | return r; |
| 1107 | } | 1112 | } |
| 1108 | 1113 | ||
| 1109 | ring = &adev->sdma[0].ring; | 1114 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 1110 | ring->ring_obj = NULL; | 1115 | ring = &adev->sdma.instance[i].ring; |
| 1111 | ring->use_doorbell = true; | 1116 | ring->ring_obj = NULL; |
| 1112 | ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE0; | 1117 | ring->use_doorbell = true; |
| 1113 | 1118 | ring->doorbell_index = (i == 0) ? | |
| 1114 | ring = &adev->sdma[1].ring; | 1119 | AMDGPU_DOORBELL_sDMA_ENGINE0 : AMDGPU_DOORBELL_sDMA_ENGINE1; |
| 1115 | ring->ring_obj = NULL; | 1120 | |
| 1116 | ring->use_doorbell = true; | 1121 | sprintf(ring->name, "sdma%d", i); |
| 1117 | ring->doorbell_index = AMDGPU_DOORBELL_sDMA_ENGINE1; | 1122 | r = amdgpu_ring_init(adev, ring, 256 * 1024, |
| 1118 | 1123 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | |
| 1119 | ring = &adev->sdma[0].ring; | 1124 | &adev->sdma.trap_irq, |
| 1120 | sprintf(ring->name, "sdma0"); | 1125 | (i == 0) ? |
| 1121 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | 1126 | AMDGPU_SDMA_IRQ_TRAP0 : AMDGPU_SDMA_IRQ_TRAP1, |
| 1122 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | 1127 | AMDGPU_RING_TYPE_SDMA); |
| 1123 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP0, | 1128 | if (r) |
| 1124 | AMDGPU_RING_TYPE_SDMA); | 1129 | return r; |
| 1125 | if (r) | 1130 | } |
| 1126 | return r; | ||
| 1127 | |||
| 1128 | ring = &adev->sdma[1].ring; | ||
| 1129 | sprintf(ring->name, "sdma1"); | ||
| 1130 | r = amdgpu_ring_init(adev, ring, 256 * 1024, | ||
| 1131 | SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 0xf, | ||
| 1132 | &adev->sdma_trap_irq, AMDGPU_SDMA_IRQ_TRAP1, | ||
| 1133 | AMDGPU_RING_TYPE_SDMA); | ||
| 1134 | if (r) | ||
| 1135 | return r; | ||
| 1136 | 1131 | ||
| 1137 | return r; | 1132 | return r; |
| 1138 | } | 1133 | } |
| @@ -1140,9 +1135,10 @@ static int sdma_v3_0_sw_init(void *handle) | |||
| 1140 | static int sdma_v3_0_sw_fini(void *handle) | 1135 | static int sdma_v3_0_sw_fini(void *handle) |
| 1141 | { | 1136 | { |
| 1142 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 1137 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| 1138 | int i; | ||
| 1143 | 1139 | ||
| 1144 | amdgpu_ring_fini(&adev->sdma[0].ring); | 1140 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1145 | amdgpu_ring_fini(&adev->sdma[1].ring); | 1141 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1146 | 1142 | ||
| 1147 | return 0; | 1143 | return 0; |
| 1148 | } | 1144 | } |
| @@ -1222,7 +1218,7 @@ static void sdma_v3_0_print_status(void *handle) | |||
| 1222 | dev_info(adev->dev, "VI SDMA registers\n"); | 1218 | dev_info(adev->dev, "VI SDMA registers\n"); |
| 1223 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", | 1219 | dev_info(adev->dev, " SRBM_STATUS2=0x%08X\n", |
| 1224 | RREG32(mmSRBM_STATUS2)); | 1220 | RREG32(mmSRBM_STATUS2)); |
| 1225 | for (i = 0; i < SDMA_MAX_INSTANCE; i++) { | 1221 | for (i = 0; i < adev->sdma.num_instances; i++) { |
| 1226 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", | 1222 | dev_info(adev->dev, " SDMA%d_STATUS_REG=0x%08X\n", |
| 1227 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); | 1223 | i, RREG32(mmSDMA0_STATUS_REG + sdma_offsets[i])); |
| 1228 | dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", | 1224 | dev_info(adev->dev, " SDMA%d_F32_CNTL=0x%08X\n", |
| @@ -1367,7 +1363,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, | |||
| 1367 | case 0: | 1363 | case 0: |
| 1368 | switch (queue_id) { | 1364 | switch (queue_id) { |
| 1369 | case 0: | 1365 | case 0: |
| 1370 | amdgpu_fence_process(&adev->sdma[0].ring); | 1366 | amdgpu_fence_process(&adev->sdma.instance[0].ring); |
| 1371 | break; | 1367 | break; |
| 1372 | case 1: | 1368 | case 1: |
| 1373 | /* XXX compute */ | 1369 | /* XXX compute */ |
| @@ -1380,7 +1376,7 @@ static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, | |||
| 1380 | case 1: | 1376 | case 1: |
| 1381 | switch (queue_id) { | 1377 | switch (queue_id) { |
| 1382 | case 0: | 1378 | case 0: |
| 1383 | amdgpu_fence_process(&adev->sdma[1].ring); | 1379 | amdgpu_fence_process(&adev->sdma.instance[1].ring); |
| 1384 | break; | 1380 | break; |
| 1385 | case 1: | 1381 | case 1: |
| 1386 | /* XXX compute */ | 1382 | /* XXX compute */ |
| @@ -1468,8 +1464,10 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { | |||
| 1468 | 1464 | ||
| 1469 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) | 1465 | static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) |
| 1470 | { | 1466 | { |
| 1471 | adev->sdma[0].ring.funcs = &sdma_v3_0_ring_funcs; | 1467 | int i; |
| 1472 | adev->sdma[1].ring.funcs = &sdma_v3_0_ring_funcs; | 1468 | |
| 1469 | for (i = 0; i < adev->sdma.num_instances; i++) | ||
| 1470 | adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs; | ||
| 1473 | } | 1471 | } |
| 1474 | 1472 | ||
| 1475 | static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { | 1473 | static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { |
| @@ -1483,9 +1481,9 @@ static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = { | |||
| 1483 | 1481 | ||
| 1484 | static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) | 1482 | static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) |
| 1485 | { | 1483 | { |
| 1486 | adev->sdma_trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; | 1484 | adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; |
| 1487 | adev->sdma_trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; | 1485 | adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; |
| 1488 | adev->sdma_illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; | 1486 | adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; |
| 1489 | } | 1487 | } |
| 1490 | 1488 | ||
| 1491 | /** | 1489 | /** |
| @@ -1551,7 +1549,7 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) | |||
| 1551 | { | 1549 | { |
| 1552 | if (adev->mman.buffer_funcs == NULL) { | 1550 | if (adev->mman.buffer_funcs == NULL) { |
| 1553 | adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; | 1551 | adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; |
| 1554 | adev->mman.buffer_funcs_ring = &adev->sdma[0].ring; | 1552 | adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; |
| 1555 | } | 1553 | } |
| 1556 | } | 1554 | } |
| 1557 | 1555 | ||
| @@ -1566,7 +1564,7 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) | |||
| 1566 | { | 1564 | { |
| 1567 | if (adev->vm_manager.vm_pte_funcs == NULL) { | 1565 | if (adev->vm_manager.vm_pte_funcs == NULL) { |
| 1568 | adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; | 1566 | adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; |
| 1569 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma[0].ring; | 1567 | adev->vm_manager.vm_pte_funcs_ring = &adev->sdma.instance[0].ring; |
| 1570 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; | 1568 | adev->vm_manager.vm_pte_funcs_ring->is_pte_ring = true; |
| 1571 | } | 1569 | } |
| 1572 | } | 1570 | } |
