diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 204 |
1 files changed, 110 insertions, 94 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 0c5d59b89849..bdb6362e9556 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -263,6 +263,109 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, | |||
263 | return 0; | 263 | return 0; |
264 | } | 264 | } |
265 | 265 | ||
266 | static int amdgpu_hw_ip_info(struct amdgpu_device *adev, | ||
267 | struct drm_amdgpu_info *info, | ||
268 | struct drm_amdgpu_info_hw_ip *result) | ||
269 | { | ||
270 | uint32_t ib_start_alignment = 0; | ||
271 | uint32_t ib_size_alignment = 0; | ||
272 | enum amd_ip_block_type type; | ||
273 | uint32_t ring_mask = 0; | ||
274 | unsigned int i, j; | ||
275 | |||
276 | if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | ||
277 | return -EINVAL; | ||
278 | |||
279 | switch (info->query_hw_ip.type) { | ||
280 | case AMDGPU_HW_IP_GFX: | ||
281 | type = AMD_IP_BLOCK_TYPE_GFX; | ||
282 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
283 | ring_mask |= adev->gfx.gfx_ring[i].ready << i; | ||
284 | ib_start_alignment = 32; | ||
285 | ib_size_alignment = 32; | ||
286 | break; | ||
287 | case AMDGPU_HW_IP_COMPUTE: | ||
288 | type = AMD_IP_BLOCK_TYPE_GFX; | ||
289 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
290 | ring_mask |= adev->gfx.compute_ring[i].ready << i; | ||
291 | ib_start_alignment = 32; | ||
292 | ib_size_alignment = 32; | ||
293 | break; | ||
294 | case AMDGPU_HW_IP_DMA: | ||
295 | type = AMD_IP_BLOCK_TYPE_SDMA; | ||
296 | for (i = 0; i < adev->sdma.num_instances; i++) | ||
297 | ring_mask |= adev->sdma.instance[i].ring.ready << i; | ||
298 | ib_start_alignment = 256; | ||
299 | ib_size_alignment = 4; | ||
300 | break; | ||
301 | case AMDGPU_HW_IP_UVD: | ||
302 | type = AMD_IP_BLOCK_TYPE_UVD; | ||
303 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { | ||
304 | if (adev->uvd.harvest_config & (1 << i)) | ||
305 | continue; | ||
306 | ring_mask |= adev->uvd.inst[i].ring.ready; | ||
307 | } | ||
308 | ib_start_alignment = 64; | ||
309 | ib_size_alignment = 64; | ||
310 | break; | ||
311 | case AMDGPU_HW_IP_VCE: | ||
312 | type = AMD_IP_BLOCK_TYPE_VCE; | ||
313 | for (i = 0; i < adev->vce.num_rings; i++) | ||
314 | ring_mask |= adev->vce.ring[i].ready << i; | ||
315 | ib_start_alignment = 4; | ||
316 | ib_size_alignment = 1; | ||
317 | break; | ||
318 | case AMDGPU_HW_IP_UVD_ENC: | ||
319 | type = AMD_IP_BLOCK_TYPE_UVD; | ||
320 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { | ||
321 | if (adev->uvd.harvest_config & (1 << i)) | ||
322 | continue; | ||
323 | for (j = 0; j < adev->uvd.num_enc_rings; j++) | ||
324 | ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j; | ||
325 | } | ||
326 | ib_start_alignment = 64; | ||
327 | ib_size_alignment = 64; | ||
328 | break; | ||
329 | case AMDGPU_HW_IP_VCN_DEC: | ||
330 | type = AMD_IP_BLOCK_TYPE_VCN; | ||
331 | ring_mask = adev->vcn.ring_dec.ready; | ||
332 | ib_start_alignment = 16; | ||
333 | ib_size_alignment = 16; | ||
334 | break; | ||
335 | case AMDGPU_HW_IP_VCN_ENC: | ||
336 | type = AMD_IP_BLOCK_TYPE_VCN; | ||
337 | for (i = 0; i < adev->vcn.num_enc_rings; i++) | ||
338 | ring_mask |= adev->vcn.ring_enc[i].ready << i; | ||
339 | ib_start_alignment = 64; | ||
340 | ib_size_alignment = 1; | ||
341 | break; | ||
342 | case AMDGPU_HW_IP_VCN_JPEG: | ||
343 | type = AMD_IP_BLOCK_TYPE_VCN; | ||
344 | ring_mask = adev->vcn.ring_jpeg.ready; | ||
345 | ib_start_alignment = 16; | ||
346 | ib_size_alignment = 16; | ||
347 | break; | ||
348 | default: | ||
349 | return -EINVAL; | ||
350 | } | ||
351 | |||
352 | for (i = 0; i < adev->num_ip_blocks; i++) | ||
353 | if (adev->ip_blocks[i].version->type == type && | ||
354 | adev->ip_blocks[i].status.valid) | ||
355 | break; | ||
356 | |||
357 | if (i == adev->num_ip_blocks) | ||
358 | return 0; | ||
359 | |||
360 | result->hw_ip_version_major = adev->ip_blocks[i].version->major; | ||
361 | result->hw_ip_version_minor = adev->ip_blocks[i].version->minor; | ||
362 | result->capabilities_flags = 0; | ||
363 | result->available_rings = ring_mask; | ||
364 | result->ib_start_alignment = ib_start_alignment; | ||
365 | result->ib_size_alignment = ib_size_alignment; | ||
366 | return 0; | ||
367 | } | ||
368 | |||
266 | /* | 369 | /* |
267 | * Userspace get information ioctl | 370 | * Userspace get information ioctl |
268 | */ | 371 | */ |
@@ -288,7 +391,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
288 | struct drm_crtc *crtc; | 391 | struct drm_crtc *crtc; |
289 | uint32_t ui32 = 0; | 392 | uint32_t ui32 = 0; |
290 | uint64_t ui64 = 0; | 393 | uint64_t ui64 = 0; |
291 | int i, j, found; | 394 | int i, found; |
292 | int ui32_size = sizeof(ui32); | 395 | int ui32_size = sizeof(ui32); |
293 | 396 | ||
294 | if (!info->return_size || !info->return_pointer) | 397 | if (!info->return_size || !info->return_pointer) |
@@ -318,101 +421,14 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
318 | return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; | 421 | return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0; |
319 | case AMDGPU_INFO_HW_IP_INFO: { | 422 | case AMDGPU_INFO_HW_IP_INFO: { |
320 | struct drm_amdgpu_info_hw_ip ip = {}; | 423 | struct drm_amdgpu_info_hw_ip ip = {}; |
321 | enum amd_ip_block_type type; | 424 | int ret; |
322 | uint32_t ring_mask = 0; | ||
323 | uint32_t ib_start_alignment = 0; | ||
324 | uint32_t ib_size_alignment = 0; | ||
325 | |||
326 | if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) | ||
327 | return -EINVAL; | ||
328 | 425 | ||
329 | switch (info->query_hw_ip.type) { | 426 | ret = amdgpu_hw_ip_info(adev, info, &ip); |
330 | case AMDGPU_HW_IP_GFX: | 427 | if (ret) |
331 | type = AMD_IP_BLOCK_TYPE_GFX; | 428 | return ret; |
332 | for (i = 0; i < adev->gfx.num_gfx_rings; i++) | ||
333 | ring_mask |= adev->gfx.gfx_ring[i].ready << i; | ||
334 | ib_start_alignment = 32; | ||
335 | ib_size_alignment = 32; | ||
336 | break; | ||
337 | case AMDGPU_HW_IP_COMPUTE: | ||
338 | type = AMD_IP_BLOCK_TYPE_GFX; | ||
339 | for (i = 0; i < adev->gfx.num_compute_rings; i++) | ||
340 | ring_mask |= adev->gfx.compute_ring[i].ready << i; | ||
341 | ib_start_alignment = 32; | ||
342 | ib_size_alignment = 32; | ||
343 | break; | ||
344 | case AMDGPU_HW_IP_DMA: | ||
345 | type = AMD_IP_BLOCK_TYPE_SDMA; | ||
346 | for (i = 0; i < adev->sdma.num_instances; i++) | ||
347 | ring_mask |= adev->sdma.instance[i].ring.ready << i; | ||
348 | ib_start_alignment = 256; | ||
349 | ib_size_alignment = 4; | ||
350 | break; | ||
351 | case AMDGPU_HW_IP_UVD: | ||
352 | type = AMD_IP_BLOCK_TYPE_UVD; | ||
353 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { | ||
354 | if (adev->uvd.harvest_config & (1 << i)) | ||
355 | continue; | ||
356 | ring_mask |= adev->uvd.inst[i].ring.ready; | ||
357 | } | ||
358 | ib_start_alignment = 64; | ||
359 | ib_size_alignment = 64; | ||
360 | break; | ||
361 | case AMDGPU_HW_IP_VCE: | ||
362 | type = AMD_IP_BLOCK_TYPE_VCE; | ||
363 | for (i = 0; i < adev->vce.num_rings; i++) | ||
364 | ring_mask |= adev->vce.ring[i].ready << i; | ||
365 | ib_start_alignment = 4; | ||
366 | ib_size_alignment = 1; | ||
367 | break; | ||
368 | case AMDGPU_HW_IP_UVD_ENC: | ||
369 | type = AMD_IP_BLOCK_TYPE_UVD; | ||
370 | for (i = 0; i < adev->uvd.num_uvd_inst; i++) { | ||
371 | if (adev->uvd.harvest_config & (1 << i)) | ||
372 | continue; | ||
373 | for (j = 0; j < adev->uvd.num_enc_rings; j++) | ||
374 | ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j; | ||
375 | } | ||
376 | ib_start_alignment = 64; | ||
377 | ib_size_alignment = 64; | ||
378 | break; | ||
379 | case AMDGPU_HW_IP_VCN_DEC: | ||
380 | type = AMD_IP_BLOCK_TYPE_VCN; | ||
381 | ring_mask = adev->vcn.ring_dec.ready; | ||
382 | ib_start_alignment = 16; | ||
383 | ib_size_alignment = 16; | ||
384 | break; | ||
385 | case AMDGPU_HW_IP_VCN_ENC: | ||
386 | type = AMD_IP_BLOCK_TYPE_VCN; | ||
387 | for (i = 0; i < adev->vcn.num_enc_rings; i++) | ||
388 | ring_mask |= adev->vcn.ring_enc[i].ready << i; | ||
389 | ib_start_alignment = 64; | ||
390 | ib_size_alignment = 1; | ||
391 | break; | ||
392 | case AMDGPU_HW_IP_VCN_JPEG: | ||
393 | type = AMD_IP_BLOCK_TYPE_VCN; | ||
394 | ring_mask = adev->vcn.ring_jpeg.ready; | ||
395 | ib_start_alignment = 16; | ||
396 | ib_size_alignment = 16; | ||
397 | break; | ||
398 | default: | ||
399 | return -EINVAL; | ||
400 | } | ||
401 | 429 | ||
402 | for (i = 0; i < adev->num_ip_blocks; i++) { | 430 | ret = copy_to_user(out, &ip, min((size_t)size, sizeof(ip))); |
403 | if (adev->ip_blocks[i].version->type == type && | 431 | return ret ? -EFAULT : 0; |
404 | adev->ip_blocks[i].status.valid) { | ||
405 | ip.hw_ip_version_major = adev->ip_blocks[i].version->major; | ||
406 | ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor; | ||
407 | ip.capabilities_flags = 0; | ||
408 | ip.available_rings = ring_mask; | ||
409 | ip.ib_start_alignment = ib_start_alignment; | ||
410 | ip.ib_size_alignment = ib_size_alignment; | ||
411 | break; | ||
412 | } | ||
413 | } | ||
414 | return copy_to_user(out, &ip, | ||
415 | min((size_t)size, sizeof(ip))) ? -EFAULT : 0; | ||
416 | } | 432 | } |
417 | case AMDGPU_INFO_HW_IP_COUNT: { | 433 | case AMDGPU_INFO_HW_IP_COUNT: { |
418 | enum amd_ip_block_type type; | 434 | enum amd_ip_block_type type; |