diff options
author | Alex Deucher <alexander.deucher@amd.com> | 2017-12-14 15:02:39 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-12-18 10:58:47 -0500 |
commit | 06ec907054c5a48f28ff6856885522e3a7103bb7 (patch) | |
tree | 14ec492959bb4ce62086d9952f20939cc57d5c9e /drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |
parent | 4e89df63c110d7fb4883c8b3d653d3d0e5dac67d (diff) |
drm/amdgpu: use consistent naming for static funcs in amdgpu_device.c
Prefix the functions with device or device_ip for functions which
deal with ip blocks for consistency.
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 176 |
1 files changed, 90 insertions, 86 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a3632c757ca4..92b5064b3391 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -332,7 +332,7 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, | |||
332 | BUG(); | 332 | BUG(); |
333 | } | 333 | } |
334 | 334 | ||
335 | static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | 335 | static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev) |
336 | { | 336 | { |
337 | return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, | 337 | return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, |
338 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 338 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, |
@@ -341,7 +341,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | |||
341 | (void **)&adev->vram_scratch.ptr); | 341 | (void **)&adev->vram_scratch.ptr); |
342 | } | 342 | } |
343 | 343 | ||
344 | static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) | 344 | static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev) |
345 | { | 345 | { |
346 | amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); | 346 | amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); |
347 | } | 347 | } |
@@ -391,14 +391,14 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev) | |||
391 | * GPU doorbell aperture helpers function. | 391 | * GPU doorbell aperture helpers function. |
392 | */ | 392 | */ |
393 | /** | 393 | /** |
394 | * amdgpu_doorbell_init - Init doorbell driver information. | 394 | * amdgpu_device_doorbell_init - Init doorbell driver information. |
395 | * | 395 | * |
396 | * @adev: amdgpu_device pointer | 396 | * @adev: amdgpu_device pointer |
397 | * | 397 | * |
398 | * Init doorbell driver information (CIK) | 398 | * Init doorbell driver information (CIK) |
399 | * Returns 0 on success, error on failure. | 399 | * Returns 0 on success, error on failure. |
400 | */ | 400 | */ |
401 | static int amdgpu_doorbell_init(struct amdgpu_device *adev) | 401 | static int amdgpu_device_doorbell_init(struct amdgpu_device *adev) |
402 | { | 402 | { |
403 | /* No doorbell on SI hardware generation */ | 403 | /* No doorbell on SI hardware generation */ |
404 | if (adev->asic_type < CHIP_BONAIRE) { | 404 | if (adev->asic_type < CHIP_BONAIRE) { |
@@ -431,13 +431,13 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev) | |||
431 | } | 431 | } |
432 | 432 | ||
433 | /** | 433 | /** |
434 | * amdgpu_doorbell_fini - Tear down doorbell driver information. | 434 | * amdgpu_device_doorbell_fini - Tear down doorbell driver information. |
435 | * | 435 | * |
436 | * @adev: amdgpu_device pointer | 436 | * @adev: amdgpu_device pointer |
437 | * | 437 | * |
438 | * Tear down doorbell driver information (CIK) | 438 | * Tear down doorbell driver information (CIK) |
439 | */ | 439 | */ |
440 | static void amdgpu_doorbell_fini(struct amdgpu_device *adev) | 440 | static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev) |
441 | { | 441 | { |
442 | iounmap(adev->doorbell.ptr); | 442 | iounmap(adev->doorbell.ptr); |
443 | adev->doorbell.ptr = NULL; | 443 | adev->doorbell.ptr = NULL; |
@@ -477,20 +477,20 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, | |||
477 | } | 477 | } |
478 | 478 | ||
479 | /* | 479 | /* |
480 | * amdgpu_wb_*() | 480 | * amdgpu_device_wb_*() |
481 | * Writeback is the method by which the GPU updates special pages in memory | 481 | * Writeback is the method by which the GPU updates special pages in memory |
482 | * with the status of certain GPU events (fences, ring pointers,etc.). | 482 | * with the status of certain GPU events (fences, ring pointers,etc.). |
483 | */ | 483 | */ |
484 | 484 | ||
485 | /** | 485 | /** |
486 | * amdgpu_wb_fini - Disable Writeback and free memory | 486 | * amdgpu_device_wb_fini - Disable Writeback and free memory |
487 | * | 487 | * |
488 | * @adev: amdgpu_device pointer | 488 | * @adev: amdgpu_device pointer |
489 | * | 489 | * |
490 | * Disables Writeback and frees the Writeback memory (all asics). | 490 | * Disables Writeback and frees the Writeback memory (all asics). |
491 | * Used at driver shutdown. | 491 | * Used at driver shutdown. |
492 | */ | 492 | */ |
493 | static void amdgpu_wb_fini(struct amdgpu_device *adev) | 493 | static void amdgpu_device_wb_fini(struct amdgpu_device *adev) |
494 | { | 494 | { |
495 | if (adev->wb.wb_obj) { | 495 | if (adev->wb.wb_obj) { |
496 | amdgpu_bo_free_kernel(&adev->wb.wb_obj, | 496 | amdgpu_bo_free_kernel(&adev->wb.wb_obj, |
@@ -501,7 +501,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev) | |||
501 | } | 501 | } |
502 | 502 | ||
503 | /** | 503 | /** |
504 | * amdgpu_wb_init- Init Writeback driver info and allocate memory | 504 | * amdgpu_device_wb_init- Init Writeback driver info and allocate memory |
505 | * | 505 | * |
506 | * @adev: amdgpu_device pointer | 506 | * @adev: amdgpu_device pointer |
507 | * | 507 | * |
@@ -509,7 +509,7 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev) | |||
509 | * Used at driver startup. | 509 | * Used at driver startup. |
510 | * Returns 0 on success or an -error on failure. | 510 | * Returns 0 on success or an -error on failure. |
511 | */ | 511 | */ |
512 | static int amdgpu_wb_init(struct amdgpu_device *adev) | 512 | static int amdgpu_device_wb_init(struct amdgpu_device *adev) |
513 | { | 513 | { |
514 | int r; | 514 | int r; |
515 | 515 | ||
@@ -770,7 +770,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) | |||
770 | cmd & ~PCI_COMMAND_MEMORY); | 770 | cmd & ~PCI_COMMAND_MEMORY); |
771 | 771 | ||
772 | /* Free the VRAM and doorbell BAR, we most likely need to move both. */ | 772 | /* Free the VRAM and doorbell BAR, we most likely need to move both. */ |
773 | amdgpu_doorbell_fini(adev); | 773 | amdgpu_device_doorbell_fini(adev); |
774 | if (adev->asic_type >= CHIP_BONAIRE) | 774 | if (adev->asic_type >= CHIP_BONAIRE) |
775 | pci_release_resource(adev->pdev, 2); | 775 | pci_release_resource(adev->pdev, 2); |
776 | 776 | ||
@@ -787,7 +787,7 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) | |||
787 | /* When the doorbell or fb BAR isn't available we have no chance of | 787 | /* When the doorbell or fb BAR isn't available we have no chance of |
788 | * using the device. | 788 | * using the device. |
789 | */ | 789 | */ |
790 | r = amdgpu_doorbell_init(adev); | 790 | r = amdgpu_device_doorbell_init(adev); |
791 | if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) | 791 | if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET)) |
792 | return -ENODEV; | 792 | return -ENODEV; |
793 | 793 | ||
@@ -900,7 +900,7 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev) | |||
900 | 900 | ||
901 | /* if we get transitioned to only one device, take VGA back */ | 901 | /* if we get transitioned to only one device, take VGA back */ |
902 | /** | 902 | /** |
903 | * amdgpu_vga_set_decode - enable/disable vga decode | 903 | * amdgpu_device_vga_set_decode - enable/disable vga decode |
904 | * | 904 | * |
905 | * @cookie: amdgpu_device pointer | 905 | * @cookie: amdgpu_device pointer |
906 | * @state: enable/disable vga decode | 906 | * @state: enable/disable vga decode |
@@ -908,7 +908,7 @@ void amdgpu_dummy_page_fini(struct amdgpu_device *adev) | |||
908 | * Enable/disable vga decode (all asics). | 908 | * Enable/disable vga decode (all asics). |
909 | * Returns VGA resource flags. | 909 | * Returns VGA resource flags. |
910 | */ | 910 | */ |
911 | static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) | 911 | static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state) |
912 | { | 912 | { |
913 | struct amdgpu_device *adev = cookie; | 913 | struct amdgpu_device *adev = cookie; |
914 | amdgpu_asic_set_vga_state(adev, state); | 914 | amdgpu_asic_set_vga_state(adev, state); |
@@ -919,7 +919,7 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state) | |||
919 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 919 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
920 | } | 920 | } |
921 | 921 | ||
922 | static void amdgpu_check_block_size(struct amdgpu_device *adev) | 922 | static void amdgpu_device_check_block_size(struct amdgpu_device *adev) |
923 | { | 923 | { |
924 | /* defines number of bits in page table versus page directory, | 924 | /* defines number of bits in page table versus page directory, |
925 | * a page is 4KB so we have 12 bits offset, minimum 9 bits in the | 925 | * a page is 4KB so we have 12 bits offset, minimum 9 bits in the |
@@ -934,7 +934,7 @@ static void amdgpu_check_block_size(struct amdgpu_device *adev) | |||
934 | } | 934 | } |
935 | } | 935 | } |
936 | 936 | ||
937 | static void amdgpu_check_vm_size(struct amdgpu_device *adev) | 937 | static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) |
938 | { | 938 | { |
939 | /* no need to check the default value */ | 939 | /* no need to check the default value */ |
940 | if (amdgpu_vm_size == -1) | 940 | if (amdgpu_vm_size == -1) |
@@ -948,14 +948,14 @@ static void amdgpu_check_vm_size(struct amdgpu_device *adev) | |||
948 | } | 948 | } |
949 | 949 | ||
950 | /** | 950 | /** |
951 | * amdgpu_check_arguments - validate module params | 951 | * amdgpu_device_check_arguments - validate module params |
952 | * | 952 | * |
953 | * @adev: amdgpu_device pointer | 953 | * @adev: amdgpu_device pointer |
954 | * | 954 | * |
955 | * Validates certain module parameters and updates | 955 | * Validates certain module parameters and updates |
956 | * the associated values used by the driver (all asics). | 956 | * the associated values used by the driver (all asics). |
957 | */ | 957 | */ |
958 | static void amdgpu_check_arguments(struct amdgpu_device *adev) | 958 | static void amdgpu_device_check_arguments(struct amdgpu_device *adev) |
959 | { | 959 | { |
960 | if (amdgpu_sched_jobs < 4) { | 960 | if (amdgpu_sched_jobs < 4) { |
961 | dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", | 961 | dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", |
@@ -988,9 +988,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) | |||
988 | amdgpu_vm_fragment_size = -1; | 988 | amdgpu_vm_fragment_size = -1; |
989 | } | 989 | } |
990 | 990 | ||
991 | amdgpu_check_vm_size(adev); | 991 | amdgpu_device_check_vm_size(adev); |
992 | 992 | ||
993 | amdgpu_check_block_size(adev); | 993 | amdgpu_device_check_block_size(adev); |
994 | 994 | ||
995 | if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || | 995 | if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 || |
996 | !is_power_of_2(amdgpu_vram_page_split))) { | 996 | !is_power_of_2(amdgpu_vram_page_split))) { |
@@ -1359,7 +1359,7 @@ out: | |||
1359 | return err; | 1359 | return err; |
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | static int amdgpu_early_init(struct amdgpu_device *adev) | 1362 | static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) |
1363 | { | 1363 | { |
1364 | int i, r; | 1364 | int i, r; |
1365 | 1365 | ||
@@ -1468,7 +1468,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev) | |||
1468 | return 0; | 1468 | return 0; |
1469 | } | 1469 | } |
1470 | 1470 | ||
1471 | static int amdgpu_init(struct amdgpu_device *adev) | 1471 | static int amdgpu_device_ip_init(struct amdgpu_device *adev) |
1472 | { | 1472 | { |
1473 | int i, r; | 1473 | int i, r; |
1474 | 1474 | ||
@@ -1484,7 +1484,7 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1484 | adev->ip_blocks[i].status.sw = true; | 1484 | adev->ip_blocks[i].status.sw = true; |
1485 | /* need to do gmc hw init early so we can allocate gpu mem */ | 1485 | /* need to do gmc hw init early so we can allocate gpu mem */ |
1486 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | 1486 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1487 | r = amdgpu_vram_scratch_init(adev); | 1487 | r = amdgpu_device_vram_scratch_init(adev); |
1488 | if (r) { | 1488 | if (r) { |
1489 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); | 1489 | DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); |
1490 | return r; | 1490 | return r; |
@@ -1494,9 +1494,9 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1494 | DRM_ERROR("hw_init %d failed %d\n", i, r); | 1494 | DRM_ERROR("hw_init %d failed %d\n", i, r); |
1495 | return r; | 1495 | return r; |
1496 | } | 1496 | } |
1497 | r = amdgpu_wb_init(adev); | 1497 | r = amdgpu_device_wb_init(adev); |
1498 | if (r) { | 1498 | if (r) { |
1499 | DRM_ERROR("amdgpu_wb_init failed %d\n", r); | 1499 | DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); |
1500 | return r; | 1500 | return r; |
1501 | } | 1501 | } |
1502 | adev->ip_blocks[i].status.hw = true; | 1502 | adev->ip_blocks[i].status.hw = true; |
@@ -1535,18 +1535,18 @@ static int amdgpu_init(struct amdgpu_device *adev) | |||
1535 | return 0; | 1535 | return 0; |
1536 | } | 1536 | } |
1537 | 1537 | ||
1538 | static void amdgpu_fill_reset_magic(struct amdgpu_device *adev) | 1538 | static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev) |
1539 | { | 1539 | { |
1540 | memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); | 1540 | memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM); |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | static bool amdgpu_check_vram_lost(struct amdgpu_device *adev) | 1543 | static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) |
1544 | { | 1544 | { |
1545 | return !!memcmp(adev->gart.ptr, adev->reset_magic, | 1545 | return !!memcmp(adev->gart.ptr, adev->reset_magic, |
1546 | AMDGPU_RESET_MAGIC_NUM); | 1546 | AMDGPU_RESET_MAGIC_NUM); |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) | 1549 | static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) |
1550 | { | 1550 | { |
1551 | int i = 0, r; | 1551 | int i = 0, r; |
1552 | 1552 | ||
@@ -1569,7 +1569,7 @@ static int amdgpu_late_set_cg_state(struct amdgpu_device *adev) | |||
1569 | return 0; | 1569 | return 0; |
1570 | } | 1570 | } |
1571 | 1571 | ||
1572 | static int amdgpu_late_init(struct amdgpu_device *adev) | 1572 | static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) |
1573 | { | 1573 | { |
1574 | int i = 0, r; | 1574 | int i = 0, r; |
1575 | 1575 | ||
@@ -1590,12 +1590,12 @@ static int amdgpu_late_init(struct amdgpu_device *adev) | |||
1590 | mod_delayed_work(system_wq, &adev->late_init_work, | 1590 | mod_delayed_work(system_wq, &adev->late_init_work, |
1591 | msecs_to_jiffies(AMDGPU_RESUME_MS)); | 1591 | msecs_to_jiffies(AMDGPU_RESUME_MS)); |
1592 | 1592 | ||
1593 | amdgpu_fill_reset_magic(adev); | 1593 | amdgpu_device_fill_reset_magic(adev); |
1594 | 1594 | ||
1595 | return 0; | 1595 | return 0; |
1596 | } | 1596 | } |
1597 | 1597 | ||
1598 | static int amdgpu_fini(struct amdgpu_device *adev) | 1598 | static int amdgpu_device_ip_fini(struct amdgpu_device *adev) |
1599 | { | 1599 | { |
1600 | int i, r; | 1600 | int i, r; |
1601 | 1601 | ||
@@ -1629,8 +1629,8 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1629 | continue; | 1629 | continue; |
1630 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { | 1630 | if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { |
1631 | amdgpu_free_static_csa(adev); | 1631 | amdgpu_free_static_csa(adev); |
1632 | amdgpu_wb_fini(adev); | 1632 | amdgpu_device_wb_fini(adev); |
1633 | amdgpu_vram_scratch_fini(adev); | 1633 | amdgpu_device_vram_scratch_fini(adev); |
1634 | } | 1634 | } |
1635 | 1635 | ||
1636 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && | 1636 | if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && |
@@ -1683,11 +1683,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
1683 | return 0; | 1683 | return 0; |
1684 | } | 1684 | } |
1685 | 1685 | ||
1686 | static void amdgpu_late_init_func_handler(struct work_struct *work) | 1686 | static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) |
1687 | { | 1687 | { |
1688 | struct amdgpu_device *adev = | 1688 | struct amdgpu_device *adev = |
1689 | container_of(work, struct amdgpu_device, late_init_work.work); | 1689 | container_of(work, struct amdgpu_device, late_init_work.work); |
1690 | amdgpu_late_set_cg_state(adev); | 1690 | amdgpu_device_ip_late_set_cg_state(adev); |
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | int amdgpu_suspend(struct amdgpu_device *adev) | 1693 | int amdgpu_suspend(struct amdgpu_device *adev) |
@@ -1731,7 +1731,7 @@ int amdgpu_suspend(struct amdgpu_device *adev) | |||
1731 | return 0; | 1731 | return 0; |
1732 | } | 1732 | } |
1733 | 1733 | ||
1734 | static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) | 1734 | static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) |
1735 | { | 1735 | { |
1736 | int i, r; | 1736 | int i, r; |
1737 | 1737 | ||
@@ -1760,7 +1760,7 @@ static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev) | |||
1760 | return 0; | 1760 | return 0; |
1761 | } | 1761 | } |
1762 | 1762 | ||
1763 | static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) | 1763 | static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev) |
1764 | { | 1764 | { |
1765 | int i, r; | 1765 | int i, r; |
1766 | 1766 | ||
@@ -1793,7 +1793,7 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) | |||
1793 | return 0; | 1793 | return 0; |
1794 | } | 1794 | } |
1795 | 1795 | ||
1796 | static int amdgpu_resume_phase1(struct amdgpu_device *adev) | 1796 | static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev) |
1797 | { | 1797 | { |
1798 | int i, r; | 1798 | int i, r; |
1799 | 1799 | ||
@@ -1816,7 +1816,7 @@ static int amdgpu_resume_phase1(struct amdgpu_device *adev) | |||
1816 | return 0; | 1816 | return 0; |
1817 | } | 1817 | } |
1818 | 1818 | ||
1819 | static int amdgpu_resume_phase2(struct amdgpu_device *adev) | 1819 | static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) |
1820 | { | 1820 | { |
1821 | int i, r; | 1821 | int i, r; |
1822 | 1822 | ||
@@ -1838,14 +1838,14 @@ static int amdgpu_resume_phase2(struct amdgpu_device *adev) | |||
1838 | return 0; | 1838 | return 0; |
1839 | } | 1839 | } |
1840 | 1840 | ||
1841 | static int amdgpu_resume(struct amdgpu_device *adev) | 1841 | static int amdgpu_device_ip_resume(struct amdgpu_device *adev) |
1842 | { | 1842 | { |
1843 | int r; | 1843 | int r; |
1844 | 1844 | ||
1845 | r = amdgpu_resume_phase1(adev); | 1845 | r = amdgpu_device_ip_resume_phase1(adev); |
1846 | if (r) | 1846 | if (r) |
1847 | return r; | 1847 | return r; |
1848 | r = amdgpu_resume_phase2(adev); | 1848 | r = amdgpu_device_ip_resume_phase2(adev); |
1849 | 1849 | ||
1850 | return r; | 1850 | return r; |
1851 | } | 1851 | } |
@@ -1984,7 +1984,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
1984 | hash_init(adev->mn_hash); | 1984 | hash_init(adev->mn_hash); |
1985 | mutex_init(&adev->lock_reset); | 1985 | mutex_init(&adev->lock_reset); |
1986 | 1986 | ||
1987 | amdgpu_check_arguments(adev); | 1987 | amdgpu_device_check_arguments(adev); |
1988 | 1988 | ||
1989 | spin_lock_init(&adev->mmio_idx_lock); | 1989 | spin_lock_init(&adev->mmio_idx_lock); |
1990 | spin_lock_init(&adev->smc_idx_lock); | 1990 | spin_lock_init(&adev->smc_idx_lock); |
@@ -2002,7 +2002,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2002 | INIT_LIST_HEAD(&adev->ring_lru_list); | 2002 | INIT_LIST_HEAD(&adev->ring_lru_list); |
2003 | spin_lock_init(&adev->ring_lru_list_lock); | 2003 | spin_lock_init(&adev->ring_lru_list_lock); |
2004 | 2004 | ||
2005 | INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler); | 2005 | INIT_DELAYED_WORK(&adev->late_init_work, |
2006 | amdgpu_device_ip_late_init_func_handler); | ||
2006 | 2007 | ||
2007 | /* Registers mapping */ | 2008 | /* Registers mapping */ |
2008 | /* TODO: block userspace mapping of io register */ | 2009 | /* TODO: block userspace mapping of io register */ |
@@ -2022,7 +2023,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2022 | DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); | 2023 | DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); |
2023 | 2024 | ||
2024 | /* doorbell bar mapping */ | 2025 | /* doorbell bar mapping */ |
2025 | amdgpu_doorbell_init(adev); | 2026 | amdgpu_device_doorbell_init(adev); |
2026 | 2027 | ||
2027 | /* io port mapping */ | 2028 | /* io port mapping */ |
2028 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 2029 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
@@ -2036,14 +2037,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2036 | DRM_INFO("PCI I/O BAR is not found.\n"); | 2037 | DRM_INFO("PCI I/O BAR is not found.\n"); |
2037 | 2038 | ||
2038 | /* early init functions */ | 2039 | /* early init functions */ |
2039 | r = amdgpu_early_init(adev); | 2040 | r = amdgpu_device_ip_early_init(adev); |
2040 | if (r) | 2041 | if (r) |
2041 | return r; | 2042 | return r; |
2042 | 2043 | ||
2043 | /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ | 2044 | /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */ |
2044 | /* this will fail for cards that aren't VGA class devices, just | 2045 | /* this will fail for cards that aren't VGA class devices, just |
2045 | * ignore it */ | 2046 | * ignore it */ |
2046 | vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode); | 2047 | vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode); |
2047 | 2048 | ||
2048 | if (amdgpu_runtime_pm == 1) | 2049 | if (amdgpu_runtime_pm == 1) |
2049 | runtime = true; | 2050 | runtime = true; |
@@ -2118,7 +2119,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2118 | /* init the mode config */ | 2119 | /* init the mode config */ |
2119 | drm_mode_config_init(adev->ddev); | 2120 | drm_mode_config_init(adev->ddev); |
2120 | 2121 | ||
2121 | r = amdgpu_init(adev); | 2122 | r = amdgpu_device_ip_init(adev); |
2122 | if (r) { | 2123 | if (r) { |
2123 | /* failed in exclusive mode due to timeout */ | 2124 | /* failed in exclusive mode due to timeout */ |
2124 | if (amdgpu_sriov_vf(adev) && | 2125 | if (amdgpu_sriov_vf(adev) && |
@@ -2132,9 +2133,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2132 | r = -EAGAIN; | 2133 | r = -EAGAIN; |
2133 | goto failed; | 2134 | goto failed; |
2134 | } | 2135 | } |
2135 | dev_err(adev->dev, "amdgpu_init failed\n"); | 2136 | dev_err(adev->dev, "amdgpu_device_ip_init failed\n"); |
2136 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); | 2137 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); |
2137 | amdgpu_fini(adev); | 2138 | amdgpu_device_ip_fini(adev); |
2138 | goto failed; | 2139 | goto failed; |
2139 | } | 2140 | } |
2140 | 2141 | ||
@@ -2202,9 +2203,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
2202 | /* enable clockgating, etc. after ib tests, etc. since some blocks require | 2203 | /* enable clockgating, etc. after ib tests, etc. since some blocks require |
2203 | * explicit gating rather than handling it automatically. | 2204 | * explicit gating rather than handling it automatically. |
2204 | */ | 2205 | */ |
2205 | r = amdgpu_late_init(adev); | 2206 | r = amdgpu_device_ip_late_init(adev); |
2206 | if (r) { | 2207 | if (r) { |
2207 | dev_err(adev->dev, "amdgpu_late_init failed\n"); | 2208 | dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n"); |
2208 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); | 2209 | amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r); |
2209 | goto failed; | 2210 | goto failed; |
2210 | } | 2211 | } |
@@ -2239,7 +2240,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) | |||
2239 | amdgpu_ib_pool_fini(adev); | 2240 | amdgpu_ib_pool_fini(adev); |
2240 | amdgpu_fence_driver_fini(adev); | 2241 | amdgpu_fence_driver_fini(adev); |
2241 | amdgpu_fbdev_fini(adev); | 2242 | amdgpu_fbdev_fini(adev); |
2242 | r = amdgpu_fini(adev); | 2243 | r = amdgpu_device_ip_fini(adev); |
2243 | if (adev->firmware.gpu_info_fw) { | 2244 | if (adev->firmware.gpu_info_fw) { |
2244 | release_firmware(adev->firmware.gpu_info_fw); | 2245 | release_firmware(adev->firmware.gpu_info_fw); |
2245 | adev->firmware.gpu_info_fw = NULL; | 2246 | adev->firmware.gpu_info_fw = NULL; |
@@ -2262,7 +2263,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) | |||
2262 | adev->rio_mem = NULL; | 2263 | adev->rio_mem = NULL; |
2263 | iounmap(adev->rmmio); | 2264 | iounmap(adev->rmmio); |
2264 | adev->rmmio = NULL; | 2265 | adev->rmmio = NULL; |
2265 | amdgpu_doorbell_fini(adev); | 2266 | amdgpu_device_doorbell_fini(adev); |
2266 | amdgpu_pm_sysfs_fini(adev); | 2267 | amdgpu_pm_sysfs_fini(adev); |
2267 | amdgpu_debugfs_regs_cleanup(adev); | 2268 | amdgpu_debugfs_regs_cleanup(adev); |
2268 | } | 2269 | } |
@@ -2407,9 +2408,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
2407 | DRM_ERROR("amdgpu asic init failed\n"); | 2408 | DRM_ERROR("amdgpu asic init failed\n"); |
2408 | } | 2409 | } |
2409 | 2410 | ||
2410 | r = amdgpu_resume(adev); | 2411 | r = amdgpu_device_ip_resume(adev); |
2411 | if (r) { | 2412 | if (r) { |
2412 | DRM_ERROR("amdgpu_resume failed (%d).\n", r); | 2413 | DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r); |
2413 | goto unlock; | 2414 | goto unlock; |
2414 | } | 2415 | } |
2415 | amdgpu_fence_driver_resume(adev); | 2416 | amdgpu_fence_driver_resume(adev); |
@@ -2420,7 +2421,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) | |||
2420 | DRM_ERROR("ib ring test failed (%d).\n", r); | 2421 | DRM_ERROR("ib ring test failed (%d).\n", r); |
2421 | } | 2422 | } |
2422 | 2423 | ||
2423 | r = amdgpu_late_init(adev); | 2424 | r = amdgpu_device_ip_late_init(adev); |
2424 | if (r) | 2425 | if (r) |
2425 | goto unlock; | 2426 | goto unlock; |
2426 | 2427 | ||
@@ -2500,7 +2501,7 @@ unlock: | |||
2500 | return r; | 2501 | return r; |
2501 | } | 2502 | } |
2502 | 2503 | ||
2503 | static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) | 2504 | static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) |
2504 | { | 2505 | { |
2505 | int i; | 2506 | int i; |
2506 | bool asic_hang = false; | 2507 | bool asic_hang = false; |
@@ -2522,7 +2523,7 @@ static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) | |||
2522 | return asic_hang; | 2523 | return asic_hang; |
2523 | } | 2524 | } |
2524 | 2525 | ||
2525 | static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) | 2526 | static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev) |
2526 | { | 2527 | { |
2527 | int i, r = 0; | 2528 | int i, r = 0; |
2528 | 2529 | ||
@@ -2540,7 +2541,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) | |||
2540 | return 0; | 2541 | return 0; |
2541 | } | 2542 | } |
2542 | 2543 | ||
2543 | static bool amdgpu_need_full_reset(struct amdgpu_device *adev) | 2544 | static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) |
2544 | { | 2545 | { |
2545 | int i; | 2546 | int i; |
2546 | 2547 | ||
@@ -2561,7 +2562,7 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev) | |||
2561 | return false; | 2562 | return false; |
2562 | } | 2563 | } |
2563 | 2564 | ||
2564 | static int amdgpu_soft_reset(struct amdgpu_device *adev) | 2565 | static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev) |
2565 | { | 2566 | { |
2566 | int i, r = 0; | 2567 | int i, r = 0; |
2567 | 2568 | ||
@@ -2579,7 +2580,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev) | |||
2579 | return 0; | 2580 | return 0; |
2580 | } | 2581 | } |
2581 | 2582 | ||
2582 | static int amdgpu_post_soft_reset(struct amdgpu_device *adev) | 2583 | static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev) |
2583 | { | 2584 | { |
2584 | int i, r = 0; | 2585 | int i, r = 0; |
2585 | 2586 | ||
@@ -2604,10 +2605,10 @@ bool amdgpu_need_backup(struct amdgpu_device *adev) | |||
2604 | return amdgpu_gpu_recovery; | 2605 | return amdgpu_gpu_recovery; |
2605 | } | 2606 | } |
2606 | 2607 | ||
2607 | static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, | 2608 | static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device *adev, |
2608 | struct amdgpu_ring *ring, | 2609 | struct amdgpu_ring *ring, |
2609 | struct amdgpu_bo *bo, | 2610 | struct amdgpu_bo *bo, |
2610 | struct dma_fence **fence) | 2611 | struct dma_fence **fence) |
2611 | { | 2612 | { |
2612 | uint32_t domain; | 2613 | uint32_t domain; |
2613 | int r; | 2614 | int r; |
@@ -2640,7 +2641,7 @@ err: | |||
2640 | } | 2641 | } |
2641 | 2642 | ||
2642 | /* | 2643 | /* |
2643 | * amdgpu_reset - reset ASIC/GPU for bare-metal or passthrough | 2644 | * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough |
2644 | * | 2645 | * |
2645 | * @adev: amdgpu device pointer | 2646 | * @adev: amdgpu device pointer |
2646 | * @reset_flags: output param tells caller the reset result | 2647 | * @reset_flags: output param tells caller the reset result |
@@ -2648,18 +2649,19 @@ err: | |||
2648 | * attempt to do soft-reset or full-reset and reinitialize Asic | 2649 | * attempt to do soft-reset or full-reset and reinitialize Asic |
2649 | * return 0 means successed otherwise failed | 2650 | * return 0 means successed otherwise failed |
2650 | */ | 2651 | */ |
2651 | static int amdgpu_reset(struct amdgpu_device *adev, uint64_t* reset_flags) | 2652 | static int amdgpu_device_reset(struct amdgpu_device *adev, |
2653 | uint64_t* reset_flags) | ||
2652 | { | 2654 | { |
2653 | bool need_full_reset, vram_lost = 0; | 2655 | bool need_full_reset, vram_lost = 0; |
2654 | int r; | 2656 | int r; |
2655 | 2657 | ||
2656 | need_full_reset = amdgpu_need_full_reset(adev); | 2658 | need_full_reset = amdgpu_device_ip_need_full_reset(adev); |
2657 | 2659 | ||
2658 | if (!need_full_reset) { | 2660 | if (!need_full_reset) { |
2659 | amdgpu_pre_soft_reset(adev); | 2661 | amdgpu_device_ip_pre_soft_reset(adev); |
2660 | r = amdgpu_soft_reset(adev); | 2662 | r = amdgpu_device_ip_soft_reset(adev); |
2661 | amdgpu_post_soft_reset(adev); | 2663 | amdgpu_device_ip_post_soft_reset(adev); |
2662 | if (r || amdgpu_check_soft_reset(adev)) { | 2664 | if (r || amdgpu_device_ip_check_soft_reset(adev)) { |
2663 | DRM_INFO("soft reset failed, will fallback to full reset!\n"); | 2665 | DRM_INFO("soft reset failed, will fallback to full reset!\n"); |
2664 | need_full_reset = true; | 2666 | need_full_reset = true; |
2665 | } | 2667 | } |
@@ -2676,11 +2678,11 @@ retry: | |||
2676 | 2678 | ||
2677 | if (!r) { | 2679 | if (!r) { |
2678 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); | 2680 | dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); |
2679 | r = amdgpu_resume_phase1(adev); | 2681 | r = amdgpu_device_ip_resume_phase1(adev); |
2680 | if (r) | 2682 | if (r) |
2681 | goto out; | 2683 | goto out; |
2682 | 2684 | ||
2683 | vram_lost = amdgpu_check_vram_lost(adev); | 2685 | vram_lost = amdgpu_device_check_vram_lost(adev); |
2684 | if (vram_lost) { | 2686 | if (vram_lost) { |
2685 | DRM_ERROR("VRAM is lost!\n"); | 2687 | DRM_ERROR("VRAM is lost!\n"); |
2686 | atomic_inc(&adev->vram_lost_counter); | 2688 | atomic_inc(&adev->vram_lost_counter); |
@@ -2691,12 +2693,12 @@ retry: | |||
2691 | if (r) | 2693 | if (r) |
2692 | goto out; | 2694 | goto out; |
2693 | 2695 | ||
2694 | r = amdgpu_resume_phase2(adev); | 2696 | r = amdgpu_device_ip_resume_phase2(adev); |
2695 | if (r) | 2697 | if (r) |
2696 | goto out; | 2698 | goto out; |
2697 | 2699 | ||
2698 | if (vram_lost) | 2700 | if (vram_lost) |
2699 | amdgpu_fill_reset_magic(adev); | 2701 | amdgpu_device_fill_reset_magic(adev); |
2700 | } | 2702 | } |
2701 | } | 2703 | } |
2702 | 2704 | ||
@@ -2724,7 +2726,7 @@ out: | |||
2724 | } | 2726 | } |
2725 | 2727 | ||
2726 | /* | 2728 | /* |
2727 | * amdgpu_reset_sriov - reset ASIC for SR-IOV vf | 2729 | * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf |
2728 | * | 2730 | * |
2729 | * @adev: amdgpu device pointer | 2731 | * @adev: amdgpu device pointer |
2730 | * @reset_flags: output param tells caller the reset result | 2732 | * @reset_flags: output param tells caller the reset result |
@@ -2732,7 +2734,9 @@ out: | |||
2732 | * do VF FLR and reinitialize Asic | 2734 | * do VF FLR and reinitialize Asic |
2733 | * return 0 means successed otherwise failed | 2735 | * return 0 means successed otherwise failed |
2734 | */ | 2736 | */ |
2735 | static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, bool from_hypervisor) | 2737 | static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, |
2738 | uint64_t *reset_flags, | ||
2739 | bool from_hypervisor) | ||
2736 | { | 2740 | { |
2737 | int r; | 2741 | int r; |
2738 | 2742 | ||
@@ -2744,7 +2748,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, | |||
2744 | return r; | 2748 | return r; |
2745 | 2749 | ||
2746 | /* Resume IP prior to SMC */ | 2750 | /* Resume IP prior to SMC */ |
2747 | r = amdgpu_sriov_reinit_early(adev); | 2751 | r = amdgpu_device_ip_reinit_early_sriov(adev); |
2748 | if (r) | 2752 | if (r) |
2749 | goto error; | 2753 | goto error; |
2750 | 2754 | ||
@@ -2752,7 +2756,7 @@ static int amdgpu_reset_sriov(struct amdgpu_device *adev, uint64_t *reset_flags, | |||
2752 | amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); | 2756 | amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); |
2753 | 2757 | ||
2754 | /* now we are okay to resume SMC/CP/SDMA */ | 2758 | /* now we are okay to resume SMC/CP/SDMA */ |
2755 | r = amdgpu_sriov_reinit_late(adev); | 2759 | r = amdgpu_device_ip_reinit_late_sriov(adev); |
2756 | if (r) | 2760 | if (r) |
2757 | goto error; | 2761 | goto error; |
2758 | 2762 | ||
@@ -2794,7 +2798,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool | |||
2794 | uint64_t reset_flags = 0; | 2798 | uint64_t reset_flags = 0; |
2795 | int i, r, resched; | 2799 | int i, r, resched; |
2796 | 2800 | ||
2797 | if (!amdgpu_check_soft_reset(adev)) { | 2801 | if (!amdgpu_device_ip_check_soft_reset(adev)) { |
2798 | DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); | 2802 | DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); |
2799 | return 0; | 2803 | return 0; |
2800 | } | 2804 | } |
@@ -2836,9 +2840,9 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool | |||
2836 | } | 2840 | } |
2837 | 2841 | ||
2838 | if (amdgpu_sriov_vf(adev)) | 2842 | if (amdgpu_sriov_vf(adev)) |
2839 | r = amdgpu_reset_sriov(adev, &reset_flags, job ? false : true); | 2843 | r = amdgpu_device_reset_sriov(adev, &reset_flags, job ? false : true); |
2840 | else | 2844 | else |
2841 | r = amdgpu_reset(adev, &reset_flags); | 2845 | r = amdgpu_device_reset(adev, &reset_flags); |
2842 | 2846 | ||
2843 | if (!r) { | 2847 | if (!r) { |
2844 | if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || | 2848 | if (((reset_flags & AMDGPU_RESET_INFO_FULLRESET) && !(adev->flags & AMD_IS_APU)) || |
@@ -2851,7 +2855,7 @@ int amdgpu_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool | |||
2851 | mutex_lock(&adev->shadow_list_lock); | 2855 | mutex_lock(&adev->shadow_list_lock); |
2852 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { | 2856 | list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) { |
2853 | next = NULL; | 2857 | next = NULL; |
2854 | amdgpu_recover_vram_from_shadow(adev, ring, bo, &next); | 2858 | amdgpu_device_recover_vram_from_shadow(adev, ring, bo, &next); |
2855 | if (fence) { | 2859 | if (fence) { |
2856 | r = dma_fence_wait(fence, false); | 2860 | r = dma_fence_wait(fence, false); |
2857 | if (r) { | 2861 | if (r) { |