aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c62
1 files changed, 56 insertions, 6 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index c2c7fb140338..78392671046a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -306,10 +306,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
306 } 306 }
307 307
308 for (i = 0; i < adev->num_ip_blocks; i++) { 308 for (i = 0; i < adev->num_ip_blocks; i++) {
309 if (adev->ip_blocks[i].type == type && 309 if (adev->ip_blocks[i].version->type == type &&
310 adev->ip_block_status[i].valid) { 310 adev->ip_blocks[i].status.valid) {
311 ip.hw_ip_version_major = adev->ip_blocks[i].major; 311 ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
312 ip.hw_ip_version_minor = adev->ip_blocks[i].minor; 312 ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
313 ip.capabilities_flags = 0; 313 ip.capabilities_flags = 0;
314 ip.available_rings = ring_mask; 314 ip.available_rings = ring_mask;
315 ip.ib_start_alignment = ib_start_alignment; 315 ip.ib_start_alignment = ib_start_alignment;
@@ -345,8 +345,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
345 } 345 }
346 346
347 for (i = 0; i < adev->num_ip_blocks; i++) 347 for (i = 0; i < adev->num_ip_blocks; i++)
348 if (adev->ip_blocks[i].type == type && 348 if (adev->ip_blocks[i].version->type == type &&
349 adev->ip_block_status[i].valid && 349 adev->ip_blocks[i].status.valid &&
350 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 350 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
351 count++; 351 count++;
352 352
@@ -411,6 +411,36 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
411 return copy_to_user(out, &vram_gtt, 411 return copy_to_user(out, &vram_gtt,
412 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; 412 min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
413 } 413 }
414 case AMDGPU_INFO_MEMORY: {
415 struct drm_amdgpu_memory_info mem;
416
417 memset(&mem, 0, sizeof(mem));
418 mem.vram.total_heap_size = adev->mc.real_vram_size;
419 mem.vram.usable_heap_size =
420 adev->mc.real_vram_size - adev->vram_pin_size;
421 mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
422 mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
423
424 mem.cpu_accessible_vram.total_heap_size =
425 adev->mc.visible_vram_size;
426 mem.cpu_accessible_vram.usable_heap_size =
427 adev->mc.visible_vram_size -
428 (adev->vram_pin_size - adev->invisible_pin_size);
429 mem.cpu_accessible_vram.heap_usage =
430 atomic64_read(&adev->vram_vis_usage);
431 mem.cpu_accessible_vram.max_allocation =
432 mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
433
434 mem.gtt.total_heap_size = adev->mc.gtt_size;
435 mem.gtt.usable_heap_size =
436 adev->mc.gtt_size - adev->gart_pin_size;
437 mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
438 mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
439
440 return copy_to_user(out, &mem,
441 min((size_t)size, sizeof(mem)))
442 ? -EFAULT : 0;
443 }
414 case AMDGPU_INFO_READ_MMR_REG: { 444 case AMDGPU_INFO_READ_MMR_REG: {
415 unsigned n, alloc_size; 445 unsigned n, alloc_size;
416 uint32_t *regs; 446 uint32_t *regs;
@@ -475,6 +505,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
475 dev_info.ids_flags = 0; 505 dev_info.ids_flags = 0;
476 if (adev->flags & AMD_IS_APU) 506 if (adev->flags & AMD_IS_APU)
477 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION; 507 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
508 if (amdgpu_sriov_vf(adev))
509 dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
478 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; 510 dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
479 dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; 511 dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
480 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE); 512 dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
@@ -494,6 +526,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
494 return copy_to_user(out, &dev_info, 526 return copy_to_user(out, &dev_info,
495 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 527 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
496 } 528 }
529 case AMDGPU_INFO_VCE_CLOCK_TABLE: {
530 unsigned i;
531 struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
532 struct amd_vce_state *vce_state;
533
534 for (i = 0; i < AMDGPU_VCE_CLOCK_TABLE_ENTRIES; i++) {
535 vce_state = amdgpu_dpm_get_vce_clock_state(adev, i);
536 if (vce_state) {
537 vce_clk_table.entries[i].sclk = vce_state->sclk;
538 vce_clk_table.entries[i].mclk = vce_state->mclk;
539 vce_clk_table.entries[i].eclk = vce_state->evclk;
540 vce_clk_table.num_valid_entries++;
541 }
542 }
543
544 return copy_to_user(out, &vce_clk_table,
545 min((size_t)size, sizeof(vce_clk_table))) ? -EFAULT : 0;
546 }
497 default: 547 default:
498 DRM_DEBUG_KMS("Invalid request %d\n", info->query); 548 DRM_DEBUG_KMS("Invalid request %d\n", info->query);
499 return -EINVAL; 549 return -EINVAL;