diff options
author | Dave Airlie <airlied@redhat.com> | 2017-08-17 15:30:53 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-08-17 15:30:53 -0400 |
commit | 2040c47361646d18b9832fd930d2a025da002a57 (patch) | |
tree | 8584015447c6863a95637b0b0a3f4af1170ffc19 | |
parent | 3154b133711f70bb50f513773947a8a647d24310 (diff) | |
parent | 37899a5254917e17418bbb23086d55e38faaa659 (diff) |
Merge branch 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux into drm-next
More features for 4.14. Nothing too major here. I have a few more additional
patches for large page support in vega10 among other things, but they require
some resevation object patches from drm-misc-next, so I'll send that request
once you've pulled the latest drm-misc-next. Highlights:
- Fixes for ACP audio on stoney
- SR-IOV fixes for vega10
- various powerplay fixes
- lots of code clean up
* 'drm-next-4.14' of git://people.freedesktop.org/~agd5f/linux: (62 commits)
drm/amdgpu/gfx7: fix function name
drm/amd/amdgpu: Disabling Power Gating for Stoney platform
drm/amd/amdgpu: Added a quirk for Stoney platform
drm/amdgpu: jt_size was wrongly counted twice
drm/amdgpu: fix missing endian-safe guard
drm/amdgpu: ignore digest_size when loading sdma fw for raven
drm/amdgpu: Uninitialized variable in amdgpu_ttm_backend_bind()
drm/amd/powerplay: fix coding style in hwmgr.c
drm/amd/powerplay: refine dmesg info under powerplay.
drm/amdgpu: don't finish the ring if not initialized
drm/radeon: Fix preferred typo
drm/amdgpu: Fix preferred typo
drm/radeon: Fix stolen typo
drm/amdgpu: Fix stolen typo
drm/amd/powerplay: fix coccinelle warnings in vega10_hwmgr.c
drm/amdgpu: set gfx_v9_0_ip_funcs as static
drm/radeon: switch to drm_*{get,put} helpers
drm/amdgpu: switch to drm_*{get,put} helpers
drm/amd/powerplay: add CZ profile support
drm/amd/powerplay: fix PSI not enabled by kmd
...
68 files changed, 788 insertions, 1133 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 51d1364cf185..a5427cf4b19d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -373,78 +373,10 @@ struct amdgpu_clock { | |||
373 | }; | 373 | }; |
374 | 374 | ||
375 | /* | 375 | /* |
376 | * BO. | 376 | * GEM. |
377 | */ | 377 | */ |
378 | struct amdgpu_bo_list_entry { | ||
379 | struct amdgpu_bo *robj; | ||
380 | struct ttm_validate_buffer tv; | ||
381 | struct amdgpu_bo_va *bo_va; | ||
382 | uint32_t priority; | ||
383 | struct page **user_pages; | ||
384 | int user_invalidated; | ||
385 | }; | ||
386 | |||
387 | struct amdgpu_bo_va_mapping { | ||
388 | struct list_head list; | ||
389 | struct rb_node rb; | ||
390 | uint64_t start; | ||
391 | uint64_t last; | ||
392 | uint64_t __subtree_last; | ||
393 | uint64_t offset; | ||
394 | uint64_t flags; | ||
395 | }; | ||
396 | |||
397 | /* bo virtual addresses in a specific vm */ | ||
398 | struct amdgpu_bo_va { | ||
399 | /* protected by bo being reserved */ | ||
400 | struct list_head bo_list; | ||
401 | struct dma_fence *last_pt_update; | ||
402 | unsigned ref_count; | ||
403 | |||
404 | /* protected by vm mutex and spinlock */ | ||
405 | struct list_head vm_status; | ||
406 | |||
407 | /* mappings for this bo_va */ | ||
408 | struct list_head invalids; | ||
409 | struct list_head valids; | ||
410 | |||
411 | /* constant after initialization */ | ||
412 | struct amdgpu_vm *vm; | ||
413 | struct amdgpu_bo *bo; | ||
414 | }; | ||
415 | 378 | ||
416 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 | 379 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 |
417 | |||
418 | struct amdgpu_bo { | ||
419 | /* Protected by tbo.reserved */ | ||
420 | u32 prefered_domains; | ||
421 | u32 allowed_domains; | ||
422 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | ||
423 | struct ttm_placement placement; | ||
424 | struct ttm_buffer_object tbo; | ||
425 | struct ttm_bo_kmap_obj kmap; | ||
426 | u64 flags; | ||
427 | unsigned pin_count; | ||
428 | void *kptr; | ||
429 | u64 tiling_flags; | ||
430 | u64 metadata_flags; | ||
431 | void *metadata; | ||
432 | u32 metadata_size; | ||
433 | unsigned prime_shared_count; | ||
434 | /* list of all virtual address to which this bo | ||
435 | * is associated to | ||
436 | */ | ||
437 | struct list_head va; | ||
438 | /* Constant after initialization */ | ||
439 | struct drm_gem_object gem_base; | ||
440 | struct amdgpu_bo *parent; | ||
441 | struct amdgpu_bo *shadow; | ||
442 | |||
443 | struct ttm_bo_kmap_obj dma_buf_vmap; | ||
444 | struct amdgpu_mn *mn; | ||
445 | struct list_head mn_list; | ||
446 | struct list_head shadow_list; | ||
447 | }; | ||
448 | #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) | 380 | #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) |
449 | 381 | ||
450 | void amdgpu_gem_object_free(struct drm_gem_object *obj); | 382 | void amdgpu_gem_object_free(struct drm_gem_object *obj); |
@@ -678,15 +610,15 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT | |||
678 | /* overlap the doorbell assignment with VCN as they are mutually exclusive | 610 | /* overlap the doorbell assignment with VCN as they are mutually exclusive |
679 | * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD | 611 | * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD |
680 | */ | 612 | */ |
681 | AMDGPU_DOORBELL64_RING0_1 = 0xF8, | 613 | AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, |
682 | AMDGPU_DOORBELL64_RING2_3 = 0xF9, | 614 | AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, |
683 | AMDGPU_DOORBELL64_RING4_5 = 0xFA, | 615 | AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, |
684 | AMDGPU_DOORBELL64_RING6_7 = 0xFB, | 616 | AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, |
685 | 617 | ||
686 | AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC, | 618 | AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, |
687 | AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD, | 619 | AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, |
688 | AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE, | 620 | AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, |
689 | AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF, | 621 | AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, |
690 | 622 | ||
691 | AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, | 623 | AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, |
692 | AMDGPU_DOORBELL64_INVALID = 0xFFFF | 624 | AMDGPU_DOORBELL64_INVALID = 0xFFFF |
@@ -825,6 +757,14 @@ struct amdgpu_fpriv { | |||
825 | /* | 757 | /* |
826 | * residency list | 758 | * residency list |
827 | */ | 759 | */ |
760 | struct amdgpu_bo_list_entry { | ||
761 | struct amdgpu_bo *robj; | ||
762 | struct ttm_validate_buffer tv; | ||
763 | struct amdgpu_bo_va *bo_va; | ||
764 | uint32_t priority; | ||
765 | struct page **user_pages; | ||
766 | int user_invalidated; | ||
767 | }; | ||
828 | 768 | ||
829 | struct amdgpu_bo_list { | 769 | struct amdgpu_bo_list { |
830 | struct mutex lock; | 770 | struct mutex lock; |
@@ -1191,10 +1131,6 @@ struct amdgpu_wb { | |||
1191 | 1131 | ||
1192 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); | 1132 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); |
1193 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); | 1133 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); |
1194 | int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb); | ||
1195 | int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb); | ||
1196 | void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb); | ||
1197 | void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb); | ||
1198 | 1134 | ||
1199 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); | 1135 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); |
1200 | 1136 | ||
@@ -1488,7 +1424,7 @@ struct amdgpu_device { | |||
1488 | bool is_atom_fw; | 1424 | bool is_atom_fw; |
1489 | uint8_t *bios; | 1425 | uint8_t *bios; |
1490 | uint32_t bios_size; | 1426 | uint32_t bios_size; |
1491 | struct amdgpu_bo *stollen_vga_memory; | 1427 | struct amdgpu_bo *stolen_vga_memory; |
1492 | uint32_t bios_scratch_reg_offset; | 1428 | uint32_t bios_scratch_reg_offset; |
1493 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; | 1429 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; |
1494 | 1430 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 06879d1dcabd..a52795d9b458 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | |||
@@ -285,19 +285,20 @@ static int acp_hw_init(void *handle) | |||
285 | return 0; | 285 | return 0; |
286 | else if (r) | 286 | else if (r) |
287 | return r; | 287 | return r; |
288 | if (adev->asic_type != CHIP_STONEY) { | ||
289 | adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); | ||
290 | if (adev->acp.acp_genpd == NULL) | ||
291 | return -ENOMEM; | ||
288 | 292 | ||
289 | adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); | 293 | adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; |
290 | if (adev->acp.acp_genpd == NULL) | 294 | adev->acp.acp_genpd->gpd.power_off = acp_poweroff; |
291 | return -ENOMEM; | 295 | adev->acp.acp_genpd->gpd.power_on = acp_poweron; |
292 | |||
293 | adev->acp.acp_genpd->gpd.name = "ACP_AUDIO"; | ||
294 | adev->acp.acp_genpd->gpd.power_off = acp_poweroff; | ||
295 | adev->acp.acp_genpd->gpd.power_on = acp_poweron; | ||
296 | 296 | ||
297 | 297 | ||
298 | adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; | 298 | adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device; |
299 | 299 | ||
300 | pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); | 300 | pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false); |
301 | } | ||
301 | 302 | ||
302 | adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, | 303 | adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS, |
303 | GFP_KERNEL); | 304 | GFP_KERNEL); |
@@ -319,14 +320,29 @@ static int acp_hw_init(void *handle) | |||
319 | return -ENOMEM; | 320 | return -ENOMEM; |
320 | } | 321 | } |
321 | 322 | ||
322 | i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; | 323 | switch (adev->asic_type) { |
324 | case CHIP_STONEY: | ||
325 | i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | | ||
326 | DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; | ||
327 | break; | ||
328 | default: | ||
329 | i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET; | ||
330 | } | ||
323 | i2s_pdata[0].cap = DWC_I2S_PLAY; | 331 | i2s_pdata[0].cap = DWC_I2S_PLAY; |
324 | i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; | 332 | i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000; |
325 | i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; | 333 | i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET; |
326 | i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; | 334 | i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET; |
335 | switch (adev->asic_type) { | ||
336 | case CHIP_STONEY: | ||
337 | i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | | ||
338 | DW_I2S_QUIRK_COMP_PARAM1 | | ||
339 | DW_I2S_QUIRK_16BIT_IDX_OVERRIDE; | ||
340 | break; | ||
341 | default: | ||
342 | i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | | ||
343 | DW_I2S_QUIRK_COMP_PARAM1; | ||
344 | } | ||
327 | 345 | ||
328 | i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET | | ||
329 | DW_I2S_QUIRK_COMP_PARAM1; | ||
330 | i2s_pdata[1].cap = DWC_I2S_RECORD; | 346 | i2s_pdata[1].cap = DWC_I2S_RECORD; |
331 | i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; | 347 | i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000; |
332 | i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; | 348 | i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; |
@@ -373,12 +389,14 @@ static int acp_hw_init(void *handle) | |||
373 | if (r) | 389 | if (r) |
374 | return r; | 390 | return r; |
375 | 391 | ||
376 | for (i = 0; i < ACP_DEVS ; i++) { | 392 | if (adev->asic_type != CHIP_STONEY) { |
377 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); | 393 | for (i = 0; i < ACP_DEVS ; i++) { |
378 | r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); | 394 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); |
379 | if (r) { | 395 | r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); |
380 | dev_err(dev, "Failed to add dev to genpd\n"); | 396 | if (r) { |
381 | return r; | 397 | dev_err(dev, "Failed to add dev to genpd\n"); |
398 | return r; | ||
399 | } | ||
382 | } | 400 | } |
383 | } | 401 | } |
384 | 402 | ||
@@ -398,20 +416,22 @@ static int acp_hw_fini(void *handle) | |||
398 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 416 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
399 | 417 | ||
400 | /* return early if no ACP */ | 418 | /* return early if no ACP */ |
401 | if (!adev->acp.acp_genpd) | 419 | if (!adev->acp.acp_cell) |
402 | return 0; | 420 | return 0; |
403 | 421 | ||
404 | for (i = 0; i < ACP_DEVS ; i++) { | 422 | if (adev->acp.acp_genpd) { |
405 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); | 423 | for (i = 0; i < ACP_DEVS ; i++) { |
406 | ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); | 424 | dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); |
407 | /* If removal fails, dont giveup and try rest */ | 425 | ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); |
408 | if (ret) | 426 | /* If removal fails, dont giveup and try rest */ |
409 | dev_err(dev, "remove dev from genpd failed\n"); | 427 | if (ret) |
428 | dev_err(dev, "remove dev from genpd failed\n"); | ||
429 | } | ||
430 | kfree(adev->acp.acp_genpd); | ||
410 | } | 431 | } |
411 | 432 | ||
412 | mfd_remove_devices(adev->acp.parent); | 433 | mfd_remove_devices(adev->acp.parent); |
413 | kfree(adev->acp.acp_res); | 434 | kfree(adev->acp.acp_res); |
414 | kfree(adev->acp.acp_genpd); | ||
415 | kfree(adev->acp.acp_cell); | 435 | kfree(adev->acp.acp_cell); |
416 | 436 | ||
417 | return 0; | 437 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index ef79551b4cb7..57afad79f55d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | |||
@@ -30,10 +30,10 @@ | |||
30 | #include <drm/drmP.h> | 30 | #include <drm/drmP.h> |
31 | #include <drm/drm_crtc_helper.h> | 31 | #include <drm/drm_crtc_helper.h> |
32 | #include "amdgpu.h" | 32 | #include "amdgpu.h" |
33 | #include "amdgpu_pm.h" | ||
33 | #include "amd_acpi.h" | 34 | #include "amd_acpi.h" |
34 | #include "atom.h" | 35 | #include "atom.h" |
35 | 36 | ||
36 | extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); | ||
37 | /* Call the ATIF method | 37 | /* Call the ATIF method |
38 | */ | 38 | */ |
39 | /** | 39 | /** |
@@ -289,7 +289,7 @@ out: | |||
289 | * handles it. | 289 | * handles it. |
290 | * Returns NOTIFY code | 290 | * Returns NOTIFY code |
291 | */ | 291 | */ |
292 | int amdgpu_atif_handler(struct amdgpu_device *adev, | 292 | static int amdgpu_atif_handler(struct amdgpu_device *adev, |
293 | struct acpi_bus_event *event) | 293 | struct acpi_bus_event *event) |
294 | { | 294 | { |
295 | struct amdgpu_atif *atif = &adev->atif; | 295 | struct amdgpu_atif *atif = &adev->atif; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 37971d9402e3..c7bcf5207d79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include "amdgpu_gfx.h" | 27 | #include "amdgpu_gfx.h" |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | 29 | ||
30 | const struct kfd2kgd_calls *kfd2kgd; | ||
31 | const struct kgd2kfd_calls *kgd2kfd; | 30 | const struct kgd2kfd_calls *kgd2kfd; |
32 | bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); | 31 | bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**); |
33 | 32 | ||
@@ -61,8 +60,21 @@ int amdgpu_amdkfd_init(void) | |||
61 | return ret; | 60 | return ret; |
62 | } | 61 | } |
63 | 62 | ||
64 | bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) | 63 | void amdgpu_amdkfd_fini(void) |
64 | { | ||
65 | if (kgd2kfd) { | ||
66 | kgd2kfd->exit(); | ||
67 | symbol_put(kgd2kfd_init); | ||
68 | } | ||
69 | } | ||
70 | |||
71 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) | ||
65 | { | 72 | { |
73 | const struct kfd2kgd_calls *kfd2kgd; | ||
74 | |||
75 | if (!kgd2kfd) | ||
76 | return; | ||
77 | |||
66 | switch (adev->asic_type) { | 78 | switch (adev->asic_type) { |
67 | #ifdef CONFIG_DRM_AMDGPU_CIK | 79 | #ifdef CONFIG_DRM_AMDGPU_CIK |
68 | case CHIP_KAVERI: | 80 | case CHIP_KAVERI: |
@@ -73,25 +85,12 @@ bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev) | |||
73 | kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); | 85 | kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); |
74 | break; | 86 | break; |
75 | default: | 87 | default: |
76 | return false; | 88 | dev_info(adev->dev, "kfd not supported on this ASIC\n"); |
89 | return; | ||
77 | } | 90 | } |
78 | 91 | ||
79 | return true; | 92 | adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, |
80 | } | 93 | adev->pdev, kfd2kgd); |
81 | |||
82 | void amdgpu_amdkfd_fini(void) | ||
83 | { | ||
84 | if (kgd2kfd) { | ||
85 | kgd2kfd->exit(); | ||
86 | symbol_put(kgd2kfd_init); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) | ||
91 | { | ||
92 | if (kgd2kfd) | ||
93 | adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev, | ||
94 | adev->pdev, kfd2kgd); | ||
95 | } | 94 | } |
96 | 95 | ||
97 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) | 96 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) |
@@ -184,7 +183,8 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, | |||
184 | return -ENOMEM; | 183 | return -ENOMEM; |
185 | 184 | ||
186 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, | 185 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, |
187 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo); | 186 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0, |
187 | &(*mem)->bo); | ||
188 | if (r) { | 188 | if (r) { |
189 | dev_err(adev->dev, | 189 | dev_err(adev->dev, |
190 | "failed to allocate BO for amdkfd (%d)\n", r); | 190 | "failed to allocate BO for amdkfd (%d)\n", r); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 73f83a10ae14..b8802a561cbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | |||
@@ -39,8 +39,6 @@ struct kgd_mem { | |||
39 | int amdgpu_amdkfd_init(void); | 39 | int amdgpu_amdkfd_init(void); |
40 | void amdgpu_amdkfd_fini(void); | 40 | void amdgpu_amdkfd_fini(void); |
41 | 41 | ||
42 | bool amdgpu_amdkfd_load_interface(struct amdgpu_device *adev); | ||
43 | |||
44 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); | 42 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev); |
45 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev); | 43 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev); |
46 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, | 44 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 2fb299afc12b..63ec1e1bb6aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | |||
@@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, | |||
81 | 81 | ||
82 | n = AMDGPU_BENCHMARK_ITERATIONS; | 82 | n = AMDGPU_BENCHMARK_ITERATIONS; |
83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, | 83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, |
84 | NULL, &sobj); | 84 | NULL, 0, &sobj); |
85 | if (r) { | 85 | if (r) { |
86 | goto out_cleanup; | 86 | goto out_cleanup; |
87 | } | 87 | } |
@@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, | |||
94 | goto out_cleanup; | 94 | goto out_cleanup; |
95 | } | 95 | } |
96 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, | 96 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, |
97 | NULL, &dobj); | 97 | NULL, 0, &dobj); |
98 | if (r) { | 98 | if (r) { |
99 | goto out_cleanup; | 99 | goto out_cleanup; |
100 | } | 100 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index d324e1c24028..59089e027f4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | |||
@@ -136,7 +136,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, | |||
136 | } | 136 | } |
137 | 137 | ||
138 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); | 138 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
139 | drm_gem_object_unreference_unlocked(gobj); | 139 | drm_gem_object_put_unlocked(gobj); |
140 | 140 | ||
141 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); | 141 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); |
142 | if (usermm) { | 142 | if (usermm) { |
@@ -156,11 +156,11 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, | |||
156 | entry->tv.bo = &entry->robj->tbo; | 156 | entry->tv.bo = &entry->robj->tbo; |
157 | entry->tv.shared = !entry->robj->prime_shared_count; | 157 | entry->tv.shared = !entry->robj->prime_shared_count; |
158 | 158 | ||
159 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) | 159 | if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) |
160 | gds_obj = entry->robj; | 160 | gds_obj = entry->robj; |
161 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) | 161 | if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) |
162 | gws_obj = entry->robj; | 162 | gws_obj = entry->robj; |
163 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) | 163 | if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) |
164 | oa_obj = entry->robj; | 164 | oa_obj = entry->robj; |
165 | 165 | ||
166 | total_size += amdgpu_bo_size(entry->robj); | 166 | total_size += amdgpu_bo_size(entry->robj); |
@@ -270,7 +270,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, | |||
270 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | 270 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
271 | union drm_amdgpu_bo_list *args = data; | 271 | union drm_amdgpu_bo_list *args = data; |
272 | uint32_t handle = args->in.list_handle; | 272 | uint32_t handle = args->in.list_handle; |
273 | const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr; | 273 | const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr); |
274 | 274 | ||
275 | struct drm_amdgpu_bo_list_entry *info; | 275 | struct drm_amdgpu_bo_list_entry *info; |
276 | struct amdgpu_bo_list *list; | 276 | struct amdgpu_bo_list *list; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a99e0bca6812..fd435a96481c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
@@ -124,7 +124,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device, | |||
124 | ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, | 124 | ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, |
125 | true, domain, flags, | 125 | true, domain, flags, |
126 | NULL, &placement, NULL, | 126 | NULL, &placement, NULL, |
127 | &obj); | 127 | 0, &obj); |
128 | if (ret) { | 128 | if (ret) { |
129 | DRM_ERROR("(%d) bo create failed\n", ret); | 129 | DRM_ERROR("(%d) bo create failed\n", ret); |
130 | return ret; | 130 | return ret; |
@@ -166,7 +166,7 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h | |||
166 | r = amdgpu_bo_reserve(obj, true); | 166 | r = amdgpu_bo_reserve(obj, true); |
167 | if (unlikely(r != 0)) | 167 | if (unlikely(r != 0)) |
168 | return r; | 168 | return r; |
169 | r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains, | 169 | r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains, |
170 | min_offset, max_offset, mcaddr); | 170 | min_offset, max_offset, mcaddr); |
171 | amdgpu_bo_unreserve(obj); | 171 | amdgpu_bo_unreserve(obj); |
172 | return r; | 172 | return r; |
@@ -659,7 +659,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | |||
659 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | 659 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); |
660 | 660 | ||
661 | if (CGS_UCODE_ID_CP_MEC == type) | 661 | if (CGS_UCODE_ID_CP_MEC == type) |
662 | info->image_size = (header->jt_offset) << 2; | 662 | info->image_size = le32_to_cpu(header->jt_offset) << 2; |
663 | 663 | ||
664 | info->fw_version = amdgpu_get_firmware_version(cgs_device, type); | 664 | info->fw_version = amdgpu_get_firmware_version(cgs_device, type); |
665 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); | 665 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 33789510e663..c05479ec825a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
@@ -54,7 +54,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, | |||
54 | 54 | ||
55 | *offset = data->offset; | 55 | *offset = data->offset; |
56 | 56 | ||
57 | drm_gem_object_unreference_unlocked(gobj); | 57 | drm_gem_object_put_unlocked(gobj); |
58 | 58 | ||
59 | if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { | 59 | if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) { |
60 | amdgpu_bo_unref(&p->uf_entry.robj); | 60 | amdgpu_bo_unref(&p->uf_entry.robj); |
@@ -90,7 +90,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
90 | } | 90 | } |
91 | 91 | ||
92 | /* get chunks */ | 92 | /* get chunks */ |
93 | chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks); | 93 | chunk_array_user = u64_to_user_ptr(cs->in.chunks); |
94 | if (copy_from_user(chunk_array, chunk_array_user, | 94 | if (copy_from_user(chunk_array, chunk_array_user, |
95 | sizeof(uint64_t)*cs->in.num_chunks)) { | 95 | sizeof(uint64_t)*cs->in.num_chunks)) { |
96 | ret = -EFAULT; | 96 | ret = -EFAULT; |
@@ -110,7 +110,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
110 | struct drm_amdgpu_cs_chunk user_chunk; | 110 | struct drm_amdgpu_cs_chunk user_chunk; |
111 | uint32_t __user *cdata; | 111 | uint32_t __user *cdata; |
112 | 112 | ||
113 | chunk_ptr = (void __user *)(uintptr_t)chunk_array[i]; | 113 | chunk_ptr = u64_to_user_ptr(chunk_array[i]); |
114 | if (copy_from_user(&user_chunk, chunk_ptr, | 114 | if (copy_from_user(&user_chunk, chunk_ptr, |
115 | sizeof(struct drm_amdgpu_cs_chunk))) { | 115 | sizeof(struct drm_amdgpu_cs_chunk))) { |
116 | ret = -EFAULT; | 116 | ret = -EFAULT; |
@@ -121,7 +121,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) | |||
121 | p->chunks[i].length_dw = user_chunk.length_dw; | 121 | p->chunks[i].length_dw = user_chunk.length_dw; |
122 | 122 | ||
123 | size = p->chunks[i].length_dw; | 123 | size = p->chunks[i].length_dw; |
124 | cdata = (void __user *)(uintptr_t)user_chunk.chunk_data; | 124 | cdata = u64_to_user_ptr(user_chunk.chunk_data); |
125 | 125 | ||
126 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); | 126 | p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); |
127 | if (p->chunks[i].kdata == NULL) { | 127 | if (p->chunks[i].kdata == NULL) { |
@@ -348,11 +348,11 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, | |||
348 | * that. | 348 | * that. |
349 | */ | 349 | */ |
350 | if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) | 350 | if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) |
351 | domain = bo->prefered_domains; | 351 | domain = bo->preferred_domains; |
352 | else | 352 | else |
353 | domain = bo->allowed_domains; | 353 | domain = bo->allowed_domains; |
354 | } else { | 354 | } else { |
355 | domain = bo->prefered_domains; | 355 | domain = bo->preferred_domains; |
356 | } | 356 | } |
357 | } else { | 357 | } else { |
358 | domain = bo->allowed_domains; | 358 | domain = bo->allowed_domains; |
@@ -1437,7 +1437,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, | |||
1437 | if (fences == NULL) | 1437 | if (fences == NULL) |
1438 | return -ENOMEM; | 1438 | return -ENOMEM; |
1439 | 1439 | ||
1440 | fences_user = (void __user *)(uintptr_t)(wait->in.fences); | 1440 | fences_user = u64_to_user_ptr(wait->in.fences); |
1441 | if (copy_from_user(fences, fences_user, | 1441 | if (copy_from_user(fences, fences_user, |
1442 | sizeof(struct drm_amdgpu_fence) * fence_count)) { | 1442 | sizeof(struct drm_amdgpu_fence) * fence_count)) { |
1443 | r = -EFAULT; | 1443 | r = -EFAULT; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6279956e92a4..a6f6cb0f2e02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -336,51 +336,16 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, | |||
336 | 336 | ||
337 | static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) | 337 | static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) |
338 | { | 338 | { |
339 | int r; | 339 | return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, |
340 | 340 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | |
341 | if (adev->vram_scratch.robj == NULL) { | 341 | &adev->vram_scratch.robj, |
342 | r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, | 342 | &adev->vram_scratch.gpu_addr, |
343 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 343 | (void **)&adev->vram_scratch.ptr); |
344 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | ||
345 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
346 | NULL, NULL, &adev->vram_scratch.robj); | ||
347 | if (r) { | ||
348 | return r; | ||
349 | } | ||
350 | } | ||
351 | |||
352 | r = amdgpu_bo_reserve(adev->vram_scratch.robj, false); | ||
353 | if (unlikely(r != 0)) | ||
354 | return r; | ||
355 | r = amdgpu_bo_pin(adev->vram_scratch.robj, | ||
356 | AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr); | ||
357 | if (r) { | ||
358 | amdgpu_bo_unreserve(adev->vram_scratch.robj); | ||
359 | return r; | ||
360 | } | ||
361 | r = amdgpu_bo_kmap(adev->vram_scratch.robj, | ||
362 | (void **)&adev->vram_scratch.ptr); | ||
363 | if (r) | ||
364 | amdgpu_bo_unpin(adev->vram_scratch.robj); | ||
365 | amdgpu_bo_unreserve(adev->vram_scratch.robj); | ||
366 | |||
367 | return r; | ||
368 | } | 344 | } |
369 | 345 | ||
370 | static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) | 346 | static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) |
371 | { | 347 | { |
372 | int r; | 348 | amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL); |
373 | |||
374 | if (adev->vram_scratch.robj == NULL) { | ||
375 | return; | ||
376 | } | ||
377 | r = amdgpu_bo_reserve(adev->vram_scratch.robj, true); | ||
378 | if (likely(r == 0)) { | ||
379 | amdgpu_bo_kunmap(adev->vram_scratch.robj); | ||
380 | amdgpu_bo_unpin(adev->vram_scratch.robj); | ||
381 | amdgpu_bo_unreserve(adev->vram_scratch.robj); | ||
382 | } | ||
383 | amdgpu_bo_unref(&adev->vram_scratch.robj); | ||
384 | } | 349 | } |
385 | 350 | ||
386 | /** | 351 | /** |
@@ -539,7 +504,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
539 | int r; | 504 | int r; |
540 | 505 | ||
541 | if (adev->wb.wb_obj == NULL) { | 506 | if (adev->wb.wb_obj == NULL) { |
542 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), | 507 | /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */ |
508 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8, | ||
543 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, | 509 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
544 | &adev->wb.wb_obj, &adev->wb.gpu_addr, | 510 | &adev->wb.wb_obj, &adev->wb.gpu_addr, |
545 | (void **)&adev->wb.wb); | 511 | (void **)&adev->wb.wb); |
@@ -570,47 +536,10 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
570 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) | 536 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb) |
571 | { | 537 | { |
572 | unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); | 538 | unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb); |
573 | if (offset < adev->wb.num_wb) { | ||
574 | __set_bit(offset, adev->wb.used); | ||
575 | *wb = offset; | ||
576 | return 0; | ||
577 | } else { | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | } | ||
581 | 539 | ||
582 | /** | 540 | if (offset < adev->wb.num_wb) { |
583 | * amdgpu_wb_get_64bit - Allocate a wb entry | ||
584 | * | ||
585 | * @adev: amdgpu_device pointer | ||
586 | * @wb: wb index | ||
587 | * | ||
588 | * Allocate a wb slot for use by the driver (all asics). | ||
589 | * Returns 0 on success or -EINVAL on failure. | ||
590 | */ | ||
591 | int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb) | ||
592 | { | ||
593 | unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, | ||
594 | adev->wb.num_wb, 0, 2, 7, 0); | ||
595 | if ((offset + 1) < adev->wb.num_wb) { | ||
596 | __set_bit(offset, adev->wb.used); | 541 | __set_bit(offset, adev->wb.used); |
597 | __set_bit(offset + 1, adev->wb.used); | 542 | *wb = offset * 8; /* convert to dw offset */ |
598 | *wb = offset; | ||
599 | return 0; | ||
600 | } else { | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | } | ||
604 | |||
605 | int amdgpu_wb_get_256Bit(struct amdgpu_device *adev, u32 *wb) | ||
606 | { | ||
607 | int i = 0; | ||
608 | unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used, | ||
609 | adev->wb.num_wb, 0, 8, 63, 0); | ||
610 | if ((offset + 7) < adev->wb.num_wb) { | ||
611 | for (i = 0; i < 8; i++) | ||
612 | __set_bit(offset + i, adev->wb.used); | ||
613 | *wb = offset; | ||
614 | return 0; | 543 | return 0; |
615 | } else { | 544 | } else { |
616 | return -EINVAL; | 545 | return -EINVAL; |
@@ -632,39 +561,6 @@ void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb) | |||
632 | } | 561 | } |
633 | 562 | ||
634 | /** | 563 | /** |
635 | * amdgpu_wb_free_64bit - Free a wb entry | ||
636 | * | ||
637 | * @adev: amdgpu_device pointer | ||
638 | * @wb: wb index | ||
639 | * | ||
640 | * Free a wb slot allocated for use by the driver (all asics) | ||
641 | */ | ||
642 | void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb) | ||
643 | { | ||
644 | if ((wb + 1) < adev->wb.num_wb) { | ||
645 | __clear_bit(wb, adev->wb.used); | ||
646 | __clear_bit(wb + 1, adev->wb.used); | ||
647 | } | ||
648 | } | ||
649 | |||
650 | /** | ||
651 | * amdgpu_wb_free_256bit - Free a wb entry | ||
652 | * | ||
653 | * @adev: amdgpu_device pointer | ||
654 | * @wb: wb index | ||
655 | * | ||
656 | * Free a wb slot allocated for use by the driver (all asics) | ||
657 | */ | ||
658 | void amdgpu_wb_free_256bit(struct amdgpu_device *adev, u32 wb) | ||
659 | { | ||
660 | int i = 0; | ||
661 | |||
662 | if ((wb + 7) < adev->wb.num_wb) | ||
663 | for (i = 0; i < 8; i++) | ||
664 | __clear_bit(wb + i, adev->wb.used); | ||
665 | } | ||
666 | |||
667 | /** | ||
668 | * amdgpu_vram_location - try to find VRAM location | 564 | * amdgpu_vram_location - try to find VRAM location |
669 | * @adev: amdgpu device structure holding all necessary informations | 565 | * @adev: amdgpu device structure holding all necessary informations |
670 | * @mc: memory controller structure holding memory informations | 566 | * @mc: memory controller structure holding memory informations |
@@ -1948,7 +1844,8 @@ static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev) | |||
1948 | AMD_IP_BLOCK_TYPE_DCE, | 1844 | AMD_IP_BLOCK_TYPE_DCE, |
1949 | AMD_IP_BLOCK_TYPE_GFX, | 1845 | AMD_IP_BLOCK_TYPE_GFX, |
1950 | AMD_IP_BLOCK_TYPE_SDMA, | 1846 | AMD_IP_BLOCK_TYPE_SDMA, |
1951 | AMD_IP_BLOCK_TYPE_VCE, | 1847 | AMD_IP_BLOCK_TYPE_UVD, |
1848 | AMD_IP_BLOCK_TYPE_VCE | ||
1952 | }; | 1849 | }; |
1953 | 1850 | ||
1954 | for (i = 0; i < ARRAY_SIZE(ip_order); i++) { | 1851 | for (i = 0; i < ARRAY_SIZE(ip_order); i++) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index cdf2ab20166a..6ad243293a78 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
@@ -482,7 +482,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
482 | { | 482 | { |
483 | struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); | 483 | struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); |
484 | 484 | ||
485 | drm_gem_object_unreference_unlocked(amdgpu_fb->obj); | 485 | drm_gem_object_put_unlocked(amdgpu_fb->obj); |
486 | drm_framebuffer_cleanup(fb); | 486 | drm_framebuffer_cleanup(fb); |
487 | kfree(amdgpu_fb); | 487 | kfree(amdgpu_fb); |
488 | } | 488 | } |
@@ -542,14 +542,14 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, | |||
542 | 542 | ||
543 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); | 543 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); |
544 | if (amdgpu_fb == NULL) { | 544 | if (amdgpu_fb == NULL) { |
545 | drm_gem_object_unreference_unlocked(obj); | 545 | drm_gem_object_put_unlocked(obj); |
546 | return ERR_PTR(-ENOMEM); | 546 | return ERR_PTR(-ENOMEM); |
547 | } | 547 | } |
548 | 548 | ||
549 | ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); | 549 | ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); |
550 | if (ret) { | 550 | if (ret) { |
551 | kfree(amdgpu_fb); | 551 | kfree(amdgpu_fb); |
552 | drm_gem_object_unreference_unlocked(obj); | 552 | drm_gem_object_put_unlocked(obj); |
553 | return ERR_PTR(ret); | 553 | return ERR_PTR(ret); |
554 | } | 554 | } |
555 | 555 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 0a8ee2411180..9afa9c097e1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | |||
@@ -118,7 +118,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
118 | amdgpu_bo_unpin(abo); | 118 | amdgpu_bo_unpin(abo); |
119 | amdgpu_bo_unreserve(abo); | 119 | amdgpu_bo_unreserve(abo); |
120 | } | 120 | } |
121 | drm_gem_object_unreference_unlocked(gobj); | 121 | drm_gem_object_put_unlocked(gobj); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, | 124 | static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, |
@@ -250,7 +250,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, | |||
250 | tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; | 250 | tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; |
251 | info->fix.smem_start = adev->mc.aper_base + tmp; | 251 | info->fix.smem_start = adev->mc.aper_base + tmp; |
252 | info->fix.smem_len = amdgpu_bo_size(abo); | 252 | info->fix.smem_len = amdgpu_bo_size(abo); |
253 | info->screen_base = abo->kptr; | 253 | info->screen_base = amdgpu_bo_kptr(abo); |
254 | info->screen_size = amdgpu_bo_size(abo); | 254 | info->screen_size = amdgpu_bo_size(abo); |
255 | 255 | ||
256 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); | 256 | drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); |
@@ -280,7 +280,7 @@ out: | |||
280 | 280 | ||
281 | } | 281 | } |
282 | if (fb && ret) { | 282 | if (fb && ret) { |
283 | drm_gem_object_unreference_unlocked(gobj); | 283 | drm_gem_object_put_unlocked(gobj); |
284 | drm_framebuffer_unregister_private(fb); | 284 | drm_framebuffer_unregister_private(fb); |
285 | drm_framebuffer_cleanup(fb); | 285 | drm_framebuffer_cleanup(fb); |
286 | kfree(fb); | 286 | kfree(fb); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 5cc4987cd887..94c1e2e8e34c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | |||
@@ -144,7 +144,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) | |||
144 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, | 144 | PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, |
145 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 145 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
146 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 146 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
147 | NULL, NULL, &adev->gart.robj); | 147 | NULL, NULL, 0, &adev->gart.robj); |
148 | if (r) { | 148 | if (r) { |
149 | return r; | 149 | return r; |
150 | } | 150 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 917ac5e074a0..81127ffcefb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | |||
@@ -59,7 +59,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, | |||
59 | 59 | ||
60 | retry: | 60 | retry: |
61 | r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, | 61 | r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, |
62 | flags, NULL, NULL, &robj); | 62 | flags, NULL, NULL, 0, &robj); |
63 | if (r) { | 63 | if (r) { |
64 | if (r != -ERESTARTSYS) { | 64 | if (r != -ERESTARTSYS) { |
65 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { | 65 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { |
@@ -91,7 +91,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) | |||
91 | spin_lock(&file->table_lock); | 91 | spin_lock(&file->table_lock); |
92 | idr_for_each_entry(&file->object_idr, gobj, handle) { | 92 | idr_for_each_entry(&file->object_idr, gobj, handle) { |
93 | WARN_ONCE(1, "And also active allocations!\n"); | 93 | WARN_ONCE(1, "And also active allocations!\n"); |
94 | drm_gem_object_unreference_unlocked(gobj); | 94 | drm_gem_object_put_unlocked(gobj); |
95 | } | 95 | } |
96 | idr_destroy(&file->object_idr); | 96 | idr_destroy(&file->object_idr); |
97 | spin_unlock(&file->table_lock); | 97 | spin_unlock(&file->table_lock); |
@@ -263,7 +263,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | |||
263 | 263 | ||
264 | r = drm_gem_handle_create(filp, gobj, &handle); | 264 | r = drm_gem_handle_create(filp, gobj, &handle); |
265 | /* drop reference from allocate - handle holds it now */ | 265 | /* drop reference from allocate - handle holds it now */ |
266 | drm_gem_object_unreference_unlocked(gobj); | 266 | drm_gem_object_put_unlocked(gobj); |
267 | if (r) | 267 | if (r) |
268 | return r; | 268 | return r; |
269 | 269 | ||
@@ -306,7 +306,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
306 | return r; | 306 | return r; |
307 | 307 | ||
308 | bo = gem_to_amdgpu_bo(gobj); | 308 | bo = gem_to_amdgpu_bo(gobj); |
309 | bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; | 309 | bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; |
310 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; | 310 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; |
311 | r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | 311 | r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); |
312 | if (r) | 312 | if (r) |
@@ -341,7 +341,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
341 | 341 | ||
342 | r = drm_gem_handle_create(filp, gobj, &handle); | 342 | r = drm_gem_handle_create(filp, gobj, &handle); |
343 | /* drop reference from allocate - handle holds it now */ | 343 | /* drop reference from allocate - handle holds it now */ |
344 | drm_gem_object_unreference_unlocked(gobj); | 344 | drm_gem_object_put_unlocked(gobj); |
345 | if (r) | 345 | if (r) |
346 | return r; | 346 | return r; |
347 | 347 | ||
@@ -355,7 +355,7 @@ unlock_mmap_sem: | |||
355 | up_read(¤t->mm->mmap_sem); | 355 | up_read(¤t->mm->mmap_sem); |
356 | 356 | ||
357 | release_object: | 357 | release_object: |
358 | drm_gem_object_unreference_unlocked(gobj); | 358 | drm_gem_object_put_unlocked(gobj); |
359 | 359 | ||
360 | return r; | 360 | return r; |
361 | } | 361 | } |
@@ -374,11 +374,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, | |||
374 | robj = gem_to_amdgpu_bo(gobj); | 374 | robj = gem_to_amdgpu_bo(gobj); |
375 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || | 375 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || |
376 | (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { | 376 | (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { |
377 | drm_gem_object_unreference_unlocked(gobj); | 377 | drm_gem_object_put_unlocked(gobj); |
378 | return -EPERM; | 378 | return -EPERM; |
379 | } | 379 | } |
380 | *offset_p = amdgpu_bo_mmap_offset(robj); | 380 | *offset_p = amdgpu_bo_mmap_offset(robj); |
381 | drm_gem_object_unreference_unlocked(gobj); | 381 | drm_gem_object_put_unlocked(gobj); |
382 | return 0; | 382 | return 0; |
383 | } | 383 | } |
384 | 384 | ||
@@ -448,7 +448,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
448 | } else | 448 | } else |
449 | r = ret; | 449 | r = ret; |
450 | 450 | ||
451 | drm_gem_object_unreference_unlocked(gobj); | 451 | drm_gem_object_put_unlocked(gobj); |
452 | return r; | 452 | return r; |
453 | } | 453 | } |
454 | 454 | ||
@@ -491,7 +491,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, | |||
491 | unreserve: | 491 | unreserve: |
492 | amdgpu_bo_unreserve(robj); | 492 | amdgpu_bo_unreserve(robj); |
493 | out: | 493 | out: |
494 | drm_gem_object_unreference_unlocked(gobj); | 494 | drm_gem_object_put_unlocked(gobj); |
495 | return r; | 495 | return r; |
496 | } | 496 | } |
497 | 497 | ||
@@ -664,7 +664,7 @@ error_backoff: | |||
664 | ttm_eu_backoff_reservation(&ticket, &list); | 664 | ttm_eu_backoff_reservation(&ticket, &list); |
665 | 665 | ||
666 | error_unref: | 666 | error_unref: |
667 | drm_gem_object_unreference_unlocked(gobj); | 667 | drm_gem_object_put_unlocked(gobj); |
668 | return r; | 668 | return r; |
669 | } | 669 | } |
670 | 670 | ||
@@ -689,11 +689,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, | |||
689 | switch (args->op) { | 689 | switch (args->op) { |
690 | case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { | 690 | case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { |
691 | struct drm_amdgpu_gem_create_in info; | 691 | struct drm_amdgpu_gem_create_in info; |
692 | void __user *out = (void __user *)(uintptr_t)args->value; | 692 | void __user *out = u64_to_user_ptr(args->value); |
693 | 693 | ||
694 | info.bo_size = robj->gem_base.size; | 694 | info.bo_size = robj->gem_base.size; |
695 | info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; | 695 | info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; |
696 | info.domains = robj->prefered_domains; | 696 | info.domains = robj->preferred_domains; |
697 | info.domain_flags = robj->flags; | 697 | info.domain_flags = robj->flags; |
698 | amdgpu_bo_unreserve(robj); | 698 | amdgpu_bo_unreserve(robj); |
699 | if (copy_to_user(out, &info, sizeof(info))) | 699 | if (copy_to_user(out, &info, sizeof(info))) |
@@ -711,10 +711,10 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, | |||
711 | amdgpu_bo_unreserve(robj); | 711 | amdgpu_bo_unreserve(robj); |
712 | break; | 712 | break; |
713 | } | 713 | } |
714 | robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | | 714 | robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | |
715 | AMDGPU_GEM_DOMAIN_GTT | | 715 | AMDGPU_GEM_DOMAIN_GTT | |
716 | AMDGPU_GEM_DOMAIN_CPU); | 716 | AMDGPU_GEM_DOMAIN_CPU); |
717 | robj->allowed_domains = robj->prefered_domains; | 717 | robj->allowed_domains = robj->preferred_domains; |
718 | if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | 718 | if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
719 | robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | 719 | robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
720 | 720 | ||
@@ -726,7 +726,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, | |||
726 | } | 726 | } |
727 | 727 | ||
728 | out: | 728 | out: |
729 | drm_gem_object_unreference_unlocked(gobj); | 729 | drm_gem_object_put_unlocked(gobj); |
730 | return r; | 730 | return r; |
731 | } | 731 | } |
732 | 732 | ||
@@ -754,7 +754,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |||
754 | 754 | ||
755 | r = drm_gem_handle_create(file_priv, gobj, &handle); | 755 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
756 | /* drop reference from allocate - handle holds it now */ | 756 | /* drop reference from allocate - handle holds it now */ |
757 | drm_gem_object_unreference_unlocked(gobj); | 757 | drm_gem_object_put_unlocked(gobj); |
758 | if (r) { | 758 | if (r) { |
759 | return r; | 759 | return r; |
760 | } | 760 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 09f833255ba1..c908f972283c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -158,7 +158,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
158 | "Error during ACPI methods call\n"); | 158 | "Error during ACPI methods call\n"); |
159 | } | 159 | } |
160 | 160 | ||
161 | amdgpu_amdkfd_load_interface(adev); | ||
162 | amdgpu_amdkfd_device_probe(adev); | 161 | amdgpu_amdkfd_device_probe(adev); |
163 | amdgpu_amdkfd_device_init(adev); | 162 | amdgpu_amdkfd_device_init(adev); |
164 | 163 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 3ec43cf9ad78..6e72fe7901ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | |||
@@ -220,7 +220,7 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |||
220 | } | 220 | } |
221 | 221 | ||
222 | /** | 222 | /** |
223 | * amdgpu_bo_create_kernel - create BO for kernel use | 223 | * amdgpu_bo_create_reserved - create reserved BO for kernel use |
224 | * | 224 | * |
225 | * @adev: amdgpu device object | 225 | * @adev: amdgpu device object |
226 | * @size: size for the new BO | 226 | * @size: size for the new BO |
@@ -230,24 +230,30 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |||
230 | * @gpu_addr: GPU addr of the pinned BO | 230 | * @gpu_addr: GPU addr of the pinned BO |
231 | * @cpu_addr: optional CPU address mapping | 231 | * @cpu_addr: optional CPU address mapping |
232 | * | 232 | * |
233 | * Allocates and pins a BO for kernel internal use. | 233 | * Allocates and pins a BO for kernel internal use, and returns it still |
234 | * reserved. | ||
234 | * | 235 | * |
235 | * Returns 0 on success, negative error code otherwise. | 236 | * Returns 0 on success, negative error code otherwise. |
236 | */ | 237 | */ |
237 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | 238 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
238 | unsigned long size, int align, | 239 | unsigned long size, int align, |
239 | u32 domain, struct amdgpu_bo **bo_ptr, | 240 | u32 domain, struct amdgpu_bo **bo_ptr, |
240 | u64 *gpu_addr, void **cpu_addr) | 241 | u64 *gpu_addr, void **cpu_addr) |
241 | { | 242 | { |
243 | bool free = false; | ||
242 | int r; | 244 | int r; |
243 | 245 | ||
244 | r = amdgpu_bo_create(adev, size, align, true, domain, | 246 | if (!*bo_ptr) { |
245 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 247 | r = amdgpu_bo_create(adev, size, align, true, domain, |
246 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 248 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
247 | NULL, NULL, bo_ptr); | 249 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
248 | if (r) { | 250 | NULL, NULL, 0, bo_ptr); |
249 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); | 251 | if (r) { |
250 | return r; | 252 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", |
253 | r); | ||
254 | return r; | ||
255 | } | ||
256 | free = true; | ||
251 | } | 257 | } |
252 | 258 | ||
253 | r = amdgpu_bo_reserve(*bo_ptr, false); | 259 | r = amdgpu_bo_reserve(*bo_ptr, false); |
@@ -270,20 +276,52 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |||
270 | } | 276 | } |
271 | } | 277 | } |
272 | 278 | ||
273 | amdgpu_bo_unreserve(*bo_ptr); | ||
274 | |||
275 | return 0; | 279 | return 0; |
276 | 280 | ||
277 | error_unreserve: | 281 | error_unreserve: |
278 | amdgpu_bo_unreserve(*bo_ptr); | 282 | amdgpu_bo_unreserve(*bo_ptr); |
279 | 283 | ||
280 | error_free: | 284 | error_free: |
281 | amdgpu_bo_unref(bo_ptr); | 285 | if (free) |
286 | amdgpu_bo_unref(bo_ptr); | ||
282 | 287 | ||
283 | return r; | 288 | return r; |
284 | } | 289 | } |
285 | 290 | ||
286 | /** | 291 | /** |
292 | * amdgpu_bo_create_kernel - create BO for kernel use | ||
293 | * | ||
294 | * @adev: amdgpu device object | ||
295 | * @size: size for the new BO | ||
296 | * @align: alignment for the new BO | ||
297 | * @domain: where to place it | ||
298 | * @bo_ptr: resulting BO | ||
299 | * @gpu_addr: GPU addr of the pinned BO | ||
300 | * @cpu_addr: optional CPU address mapping | ||
301 | * | ||
302 | * Allocates and pins a BO for kernel internal use. | ||
303 | * | ||
304 | * Returns 0 on success, negative error code otherwise. | ||
305 | */ | ||
306 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | ||
307 | unsigned long size, int align, | ||
308 | u32 domain, struct amdgpu_bo **bo_ptr, | ||
309 | u64 *gpu_addr, void **cpu_addr) | ||
310 | { | ||
311 | int r; | ||
312 | |||
313 | r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr, | ||
314 | gpu_addr, cpu_addr); | ||
315 | |||
316 | if (r) | ||
317 | return r; | ||
318 | |||
319 | amdgpu_bo_unreserve(*bo_ptr); | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /** | ||
287 | * amdgpu_bo_free_kernel - free BO for kernel use | 325 | * amdgpu_bo_free_kernel - free BO for kernel use |
288 | * | 326 | * |
289 | * @bo: amdgpu BO to free | 327 | * @bo: amdgpu BO to free |
@@ -318,6 +356,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
318 | struct sg_table *sg, | 356 | struct sg_table *sg, |
319 | struct ttm_placement *placement, | 357 | struct ttm_placement *placement, |
320 | struct reservation_object *resv, | 358 | struct reservation_object *resv, |
359 | uint64_t init_value, | ||
321 | struct amdgpu_bo **bo_ptr) | 360 | struct amdgpu_bo **bo_ptr) |
322 | { | 361 | { |
323 | struct amdgpu_bo *bo; | 362 | struct amdgpu_bo *bo; |
@@ -352,13 +391,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
352 | } | 391 | } |
353 | INIT_LIST_HEAD(&bo->shadow_list); | 392 | INIT_LIST_HEAD(&bo->shadow_list); |
354 | INIT_LIST_HEAD(&bo->va); | 393 | INIT_LIST_HEAD(&bo->va); |
355 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | | 394 | bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
356 | AMDGPU_GEM_DOMAIN_GTT | | 395 | AMDGPU_GEM_DOMAIN_GTT | |
357 | AMDGPU_GEM_DOMAIN_CPU | | 396 | AMDGPU_GEM_DOMAIN_CPU | |
358 | AMDGPU_GEM_DOMAIN_GDS | | 397 | AMDGPU_GEM_DOMAIN_GDS | |
359 | AMDGPU_GEM_DOMAIN_GWS | | 398 | AMDGPU_GEM_DOMAIN_GWS | |
360 | AMDGPU_GEM_DOMAIN_OA); | 399 | AMDGPU_GEM_DOMAIN_OA); |
361 | bo->allowed_domains = bo->prefered_domains; | 400 | bo->allowed_domains = bo->preferred_domains; |
362 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | 401 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
363 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | 402 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
364 | 403 | ||
@@ -418,7 +457,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
418 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | 457 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { |
419 | struct dma_fence *fence; | 458 | struct dma_fence *fence; |
420 | 459 | ||
421 | r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); | 460 | r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); |
422 | if (unlikely(r)) | 461 | if (unlikely(r)) |
423 | goto fail_unreserve; | 462 | goto fail_unreserve; |
424 | 463 | ||
@@ -470,6 +509,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
470 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, | 509 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, |
471 | NULL, &placement, | 510 | NULL, &placement, |
472 | bo->tbo.resv, | 511 | bo->tbo.resv, |
512 | 0, | ||
473 | &bo->shadow); | 513 | &bo->shadow); |
474 | if (!r) { | 514 | if (!r) { |
475 | bo->shadow->parent = amdgpu_bo_ref(bo); | 515 | bo->shadow->parent = amdgpu_bo_ref(bo); |
@@ -481,11 +521,15 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, | |||
481 | return r; | 521 | return r; |
482 | } | 522 | } |
483 | 523 | ||
524 | /* init_value will only take effect when flags contains | ||
525 | * AMDGPU_GEM_CREATE_VRAM_CLEARED. | ||
526 | */ | ||
484 | int amdgpu_bo_create(struct amdgpu_device *adev, | 527 | int amdgpu_bo_create(struct amdgpu_device *adev, |
485 | unsigned long size, int byte_align, | 528 | unsigned long size, int byte_align, |
486 | bool kernel, u32 domain, u64 flags, | 529 | bool kernel, u32 domain, u64 flags, |
487 | struct sg_table *sg, | 530 | struct sg_table *sg, |
488 | struct reservation_object *resv, | 531 | struct reservation_object *resv, |
532 | uint64_t init_value, | ||
489 | struct amdgpu_bo **bo_ptr) | 533 | struct amdgpu_bo **bo_ptr) |
490 | { | 534 | { |
491 | struct ttm_placement placement = {0}; | 535 | struct ttm_placement placement = {0}; |
@@ -500,7 +544,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
500 | 544 | ||
501 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, | 545 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
502 | domain, flags, sg, &placement, | 546 | domain, flags, sg, &placement, |
503 | resv, bo_ptr); | 547 | resv, init_value, bo_ptr); |
504 | if (r) | 548 | if (r) |
505 | return r; | 549 | return r; |
506 | 550 | ||
@@ -562,7 +606,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo) | |||
562 | if (bo->pin_count) | 606 | if (bo->pin_count) |
563 | return 0; | 607 | return 0; |
564 | 608 | ||
565 | domain = bo->prefered_domains; | 609 | domain = bo->preferred_domains; |
566 | 610 | ||
567 | retry: | 611 | retry: |
568 | amdgpu_ttm_placement_from_domain(bo, domain); | 612 | amdgpu_ttm_placement_from_domain(bo, domain); |
@@ -609,16 +653,16 @@ err: | |||
609 | 653 | ||
610 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | 654 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
611 | { | 655 | { |
612 | bool is_iomem; | 656 | void *kptr; |
613 | long r; | 657 | long r; |
614 | 658 | ||
615 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | 659 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
616 | return -EPERM; | 660 | return -EPERM; |
617 | 661 | ||
618 | if (bo->kptr) { | 662 | kptr = amdgpu_bo_kptr(bo); |
619 | if (ptr) { | 663 | if (kptr) { |
620 | *ptr = bo->kptr; | 664 | if (ptr) |
621 | } | 665 | *ptr = kptr; |
622 | return 0; | 666 | return 0; |
623 | } | 667 | } |
624 | 668 | ||
@@ -631,19 +675,23 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) | |||
631 | if (r) | 675 | if (r) |
632 | return r; | 676 | return r; |
633 | 677 | ||
634 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | ||
635 | if (ptr) | 678 | if (ptr) |
636 | *ptr = bo->kptr; | 679 | *ptr = amdgpu_bo_kptr(bo); |
637 | 680 | ||
638 | return 0; | 681 | return 0; |
639 | } | 682 | } |
640 | 683 | ||
684 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo) | ||
685 | { | ||
686 | bool is_iomem; | ||
687 | |||
688 | return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | ||
689 | } | ||
690 | |||
641 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) | 691 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) |
642 | { | 692 | { |
643 | if (bo->kptr == NULL) | 693 | if (bo->kmap.bo) |
644 | return; | 694 | ttm_bo_kunmap(&bo->kmap); |
645 | bo->kptr = NULL; | ||
646 | ttm_bo_kunmap(&bo->kmap); | ||
647 | } | 695 | } |
648 | 696 | ||
649 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) | 697 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 833b172a2c2a..9b7b4fcb047b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | |||
@@ -33,6 +33,67 @@ | |||
33 | 33 | ||
34 | #define AMDGPU_BO_INVALID_OFFSET LONG_MAX | 34 | #define AMDGPU_BO_INVALID_OFFSET LONG_MAX |
35 | 35 | ||
36 | struct amdgpu_bo_va_mapping { | ||
37 | struct list_head list; | ||
38 | struct rb_node rb; | ||
39 | uint64_t start; | ||
40 | uint64_t last; | ||
41 | uint64_t __subtree_last; | ||
42 | uint64_t offset; | ||
43 | uint64_t flags; | ||
44 | }; | ||
45 | |||
46 | /* bo virtual addresses in a specific vm */ | ||
47 | struct amdgpu_bo_va { | ||
48 | /* protected by bo being reserved */ | ||
49 | struct list_head bo_list; | ||
50 | struct dma_fence *last_pt_update; | ||
51 | unsigned ref_count; | ||
52 | |||
53 | /* protected by vm mutex and spinlock */ | ||
54 | struct list_head vm_status; | ||
55 | |||
56 | /* mappings for this bo_va */ | ||
57 | struct list_head invalids; | ||
58 | struct list_head valids; | ||
59 | |||
60 | /* constant after initialization */ | ||
61 | struct amdgpu_vm *vm; | ||
62 | struct amdgpu_bo *bo; | ||
63 | }; | ||
64 | |||
65 | |||
66 | struct amdgpu_bo { | ||
67 | /* Protected by tbo.reserved */ | ||
68 | u32 preferred_domains; | ||
69 | u32 allowed_domains; | ||
70 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | ||
71 | struct ttm_placement placement; | ||
72 | struct ttm_buffer_object tbo; | ||
73 | struct ttm_bo_kmap_obj kmap; | ||
74 | u64 flags; | ||
75 | unsigned pin_count; | ||
76 | u64 tiling_flags; | ||
77 | u64 metadata_flags; | ||
78 | void *metadata; | ||
79 | u32 metadata_size; | ||
80 | unsigned prime_shared_count; | ||
81 | /* list of all virtual address to which this bo is associated to */ | ||
82 | struct list_head va; | ||
83 | /* Constant after initialization */ | ||
84 | struct drm_gem_object gem_base; | ||
85 | struct amdgpu_bo *parent; | ||
86 | struct amdgpu_bo *shadow; | ||
87 | |||
88 | struct ttm_bo_kmap_obj dma_buf_vmap; | ||
89 | struct amdgpu_mn *mn; | ||
90 | |||
91 | union { | ||
92 | struct list_head mn_list; | ||
93 | struct list_head shadow_list; | ||
94 | }; | ||
95 | }; | ||
96 | |||
36 | /** | 97 | /** |
37 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type | 98 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type |
38 | * @mem_type: ttm memory type | 99 | * @mem_type: ttm memory type |
@@ -132,6 +193,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, | |||
132 | bool kernel, u32 domain, u64 flags, | 193 | bool kernel, u32 domain, u64 flags, |
133 | struct sg_table *sg, | 194 | struct sg_table *sg, |
134 | struct reservation_object *resv, | 195 | struct reservation_object *resv, |
196 | uint64_t init_value, | ||
135 | struct amdgpu_bo **bo_ptr); | 197 | struct amdgpu_bo **bo_ptr); |
136 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | 198 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, |
137 | unsigned long size, int byte_align, | 199 | unsigned long size, int byte_align, |
@@ -139,7 +201,12 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |||
139 | struct sg_table *sg, | 201 | struct sg_table *sg, |
140 | struct ttm_placement *placement, | 202 | struct ttm_placement *placement, |
141 | struct reservation_object *resv, | 203 | struct reservation_object *resv, |
204 | uint64_t init_value, | ||
142 | struct amdgpu_bo **bo_ptr); | 205 | struct amdgpu_bo **bo_ptr); |
206 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, | ||
207 | unsigned long size, int align, | ||
208 | u32 domain, struct amdgpu_bo **bo_ptr, | ||
209 | u64 *gpu_addr, void **cpu_addr); | ||
143 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | 210 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
144 | unsigned long size, int align, | 211 | unsigned long size, int align, |
145 | u32 domain, struct amdgpu_bo **bo_ptr, | 212 | u32 domain, struct amdgpu_bo **bo_ptr, |
@@ -147,6 +214,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |||
147 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, | 214 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
148 | void **cpu_addr); | 215 | void **cpu_addr); |
149 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); | 216 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
217 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo); | ||
150 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); | 218 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
151 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); | 219 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); |
152 | void amdgpu_bo_unref(struct amdgpu_bo **bo); | 220 | void amdgpu_bo_unref(struct amdgpu_bo **bo); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h index c19c4d138751..f21a7716b90e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h | |||
@@ -30,6 +30,7 @@ struct cg_flag_name | |||
30 | const char *name; | 30 | const char *name; |
31 | }; | 31 | }; |
32 | 32 | ||
33 | void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); | ||
33 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); | 34 | int amdgpu_pm_sysfs_init(struct amdgpu_device *adev); |
34 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); | 35 | void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev); |
35 | void amdgpu_pm_print_power_states(struct amdgpu_device *adev); | 36 | void amdgpu_pm_print_power_states(struct amdgpu_device *adev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 6bdc866570ab..5b3f92891f89 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -69,7 +69,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
69 | 69 | ||
70 | ww_mutex_lock(&resv->lock, NULL); | 70 | ww_mutex_lock(&resv->lock, NULL); |
71 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, | 71 | ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, |
72 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo); | 72 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); |
73 | ww_mutex_unlock(&resv->lock); | 73 | ww_mutex_unlock(&resv->lock); |
74 | if (ret) | 74 | if (ret) |
75 | return ERR_PTR(ret); | 75 | return ERR_PTR(ret); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 15b7149d1204..6c5646b48d1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
@@ -184,47 +184,22 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | |||
184 | return r; | 184 | return r; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (ring->funcs->support_64bit_ptrs) { | 187 | r = amdgpu_wb_get(adev, &ring->rptr_offs); |
188 | r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs); | 188 | if (r) { |
189 | if (r) { | 189 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); |
190 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); | 190 | return r; |
191 | return r; | ||
192 | } | ||
193 | |||
194 | r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs); | ||
195 | if (r) { | ||
196 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); | ||
197 | return r; | ||
198 | } | ||
199 | |||
200 | } else { | ||
201 | r = amdgpu_wb_get(adev, &ring->rptr_offs); | ||
202 | if (r) { | ||
203 | dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r); | ||
204 | return r; | ||
205 | } | ||
206 | |||
207 | r = amdgpu_wb_get(adev, &ring->wptr_offs); | ||
208 | if (r) { | ||
209 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); | ||
210 | return r; | ||
211 | } | ||
212 | |||
213 | } | 191 | } |
214 | 192 | ||
215 | if (amdgpu_sriov_vf(adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) { | 193 | r = amdgpu_wb_get(adev, &ring->wptr_offs); |
216 | r = amdgpu_wb_get_256Bit(adev, &ring->fence_offs); | 194 | if (r) { |
217 | if (r) { | 195 | dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r); |
218 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); | 196 | return r; |
219 | return r; | 197 | } |
220 | } | ||
221 | 198 | ||
222 | } else { | 199 | r = amdgpu_wb_get(adev, &ring->fence_offs); |
223 | r = amdgpu_wb_get(adev, &ring->fence_offs); | 200 | if (r) { |
224 | if (r) { | 201 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); |
225 | dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r); | 202 | return r; |
226 | return r; | ||
227 | } | ||
228 | } | 203 | } |
229 | 204 | ||
230 | r = amdgpu_wb_get(adev, &ring->cond_exe_offs); | 205 | r = amdgpu_wb_get(adev, &ring->cond_exe_offs); |
@@ -286,19 +261,15 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
286 | { | 261 | { |
287 | ring->ready = false; | 262 | ring->ready = false; |
288 | 263 | ||
289 | if (ring->funcs->support_64bit_ptrs) { | 264 | /* Not to finish a ring which is not initialized */ |
290 | amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs); | 265 | if (!(ring->adev) || !(ring->adev->rings[ring->idx])) |
291 | amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs); | 266 | return; |
292 | } else { | 267 | |
293 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | 268 | amdgpu_wb_free(ring->adev, ring->rptr_offs); |
294 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | 269 | amdgpu_wb_free(ring->adev, ring->wptr_offs); |
295 | } | ||
296 | 270 | ||
297 | amdgpu_wb_free(ring->adev, ring->cond_exe_offs); | 271 | amdgpu_wb_free(ring->adev, ring->cond_exe_offs); |
298 | if (amdgpu_sriov_vf(ring->adev) && ring->funcs->type == AMDGPU_RING_TYPE_GFX) | 272 | amdgpu_wb_free(ring->adev, ring->fence_offs); |
299 | amdgpu_wb_free_256bit(ring->adev, ring->fence_offs); | ||
300 | else | ||
301 | amdgpu_wb_free(ring->adev, ring->fence_offs); | ||
302 | 273 | ||
303 | amdgpu_bo_free_kernel(&ring->ring_obj, | 274 | amdgpu_bo_free_kernel(&ring->ring_obj, |
304 | &ring->gpu_addr, | 275 | &ring->gpu_addr, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 5ca75a456ad2..3144400435b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
@@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | |||
64 | INIT_LIST_HEAD(&sa_manager->flist[i]); | 64 | INIT_LIST_HEAD(&sa_manager->flist[i]); |
65 | 65 | ||
66 | r = amdgpu_bo_create(adev, size, align, true, domain, | 66 | r = amdgpu_bo_create(adev, size, align, true, domain, |
67 | 0, NULL, NULL, &sa_manager->bo); | 67 | 0, NULL, NULL, 0, &sa_manager->bo); |
68 | if (r) { | 68 | if (r) { |
69 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); | 69 | dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); |
70 | return r; | 70 | return r; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 3c4d7574d704..ed8c3739015b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | |||
@@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
61 | 61 | ||
62 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 62 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
63 | AMDGPU_GEM_DOMAIN_VRAM, 0, | 63 | AMDGPU_GEM_DOMAIN_VRAM, 0, |
64 | NULL, NULL, &vram_obj); | 64 | NULL, NULL, 0, &vram_obj); |
65 | if (r) { | 65 | if (r) { |
66 | DRM_ERROR("Failed to create VRAM object\n"); | 66 | DRM_ERROR("Failed to create VRAM object\n"); |
67 | goto out_cleanup; | 67 | goto out_cleanup; |
@@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) | |||
82 | 82 | ||
83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, | 83 | r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, |
84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, | 84 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, |
85 | NULL, gtt_obj + i); | 85 | NULL, 0, gtt_obj + i); |
86 | if (r) { | 86 | if (r) { |
87 | DRM_ERROR("Failed to create GTT object %d\n", i); | 87 | DRM_ERROR("Failed to create GTT object %d\n", i); |
88 | goto out_lclean; | 88 | goto out_lclean; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 509f7a63d40c..9ab58245e518 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | |||
@@ -105,12 +105,12 @@ TRACE_EVENT(amdgpu_bo_create, | |||
105 | __entry->bo = bo; | 105 | __entry->bo = bo; |
106 | __entry->pages = bo->tbo.num_pages; | 106 | __entry->pages = bo->tbo.num_pages; |
107 | __entry->type = bo->tbo.mem.mem_type; | 107 | __entry->type = bo->tbo.mem.mem_type; |
108 | __entry->prefer = bo->prefered_domains; | 108 | __entry->prefer = bo->preferred_domains; |
109 | __entry->allow = bo->allowed_domains; | 109 | __entry->allow = bo->allowed_domains; |
110 | __entry->visible = bo->flags; | 110 | __entry->visible = bo->flags; |
111 | ), | 111 | ), |
112 | 112 | ||
113 | TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d", | 113 | TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d", |
114 | __entry->bo, __entry->pages, __entry->type, | 114 | __entry->bo, __entry->pages, __entry->type, |
115 | __entry->prefer, __entry->allow, __entry->visible) | 115 | __entry->prefer, __entry->allow, __entry->visible) |
116 | ); | 116 | ); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e6f9a54c959d..c803b082324d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -753,7 +753,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |||
753 | struct ttm_mem_reg *bo_mem) | 753 | struct ttm_mem_reg *bo_mem) |
754 | { | 754 | { |
755 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | 755 | struct amdgpu_ttm_tt *gtt = (void*)ttm; |
756 | int r; | 756 | int r = 0; |
757 | 757 | ||
758 | if (gtt->userptr) { | 758 | if (gtt->userptr) { |
759 | r = amdgpu_ttm_tt_pin_userptr(ttm); | 759 | r = amdgpu_ttm_tt_pin_userptr(ttm); |
@@ -1232,23 +1232,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) | |||
1232 | /* Change the size here instead of the init above so only lpfn is affected */ | 1232 | /* Change the size here instead of the init above so only lpfn is affected */ |
1233 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | 1233 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); |
1234 | 1234 | ||
1235 | r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true, | 1235 | r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, |
1236 | AMDGPU_GEM_DOMAIN_VRAM, | 1236 | AMDGPU_GEM_DOMAIN_VRAM, |
1237 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 1237 | &adev->stolen_vga_memory, |
1238 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 1238 | NULL, NULL); |
1239 | NULL, NULL, &adev->stollen_vga_memory); | ||
1240 | if (r) { | ||
1241 | return r; | ||
1242 | } | ||
1243 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | ||
1244 | if (r) | 1239 | if (r) |
1245 | return r; | 1240 | return r; |
1246 | r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); | ||
1247 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | ||
1248 | if (r) { | ||
1249 | amdgpu_bo_unref(&adev->stollen_vga_memory); | ||
1250 | return r; | ||
1251 | } | ||
1252 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", | 1241 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", |
1253 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); | 1242 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); |
1254 | 1243 | ||
@@ -1319,13 +1308,13 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) | |||
1319 | if (!adev->mman.initialized) | 1308 | if (!adev->mman.initialized) |
1320 | return; | 1309 | return; |
1321 | amdgpu_ttm_debugfs_fini(adev); | 1310 | amdgpu_ttm_debugfs_fini(adev); |
1322 | if (adev->stollen_vga_memory) { | 1311 | if (adev->stolen_vga_memory) { |
1323 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, true); | 1312 | r = amdgpu_bo_reserve(adev->stolen_vga_memory, true); |
1324 | if (r == 0) { | 1313 | if (r == 0) { |
1325 | amdgpu_bo_unpin(adev->stollen_vga_memory); | 1314 | amdgpu_bo_unpin(adev->stolen_vga_memory); |
1326 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | 1315 | amdgpu_bo_unreserve(adev->stolen_vga_memory); |
1327 | } | 1316 | } |
1328 | amdgpu_bo_unref(&adev->stollen_vga_memory); | 1317 | amdgpu_bo_unref(&adev->stolen_vga_memory); |
1329 | } | 1318 | } |
1330 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); | 1319 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); |
1331 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | 1320 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); |
@@ -1509,11 +1498,12 @@ error_free: | |||
1509 | } | 1498 | } |
1510 | 1499 | ||
1511 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 1500 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
1512 | uint32_t src_data, | 1501 | uint64_t src_data, |
1513 | struct reservation_object *resv, | 1502 | struct reservation_object *resv, |
1514 | struct dma_fence **fence) | 1503 | struct dma_fence **fence) |
1515 | { | 1504 | { |
1516 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); | 1505 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
1506 | /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/ | ||
1517 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; | 1507 | uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; |
1518 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; | 1508 | struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; |
1519 | 1509 | ||
@@ -1545,7 +1535,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, | |||
1545 | num_pages -= mm_node->size; | 1535 | num_pages -= mm_node->size; |
1546 | ++mm_node; | 1536 | ++mm_node; |
1547 | } | 1537 | } |
1548 | num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; | 1538 | |
1539 | /* 10 double words for each SDMA_OP_PTEPDE cmd */ | ||
1540 | num_dw = num_loops * 10; | ||
1549 | 1541 | ||
1550 | /* for IB padding */ | 1542 | /* for IB padding */ |
1551 | num_dw += 64; | 1543 | num_dw += 64; |
@@ -1570,12 +1562,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, | |||
1570 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; | 1562 | uint32_t byte_count = mm_node->size << PAGE_SHIFT; |
1571 | uint64_t dst_addr; | 1563 | uint64_t dst_addr; |
1572 | 1564 | ||
1565 | WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8"); | ||
1566 | |||
1573 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); | 1567 | dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); |
1574 | while (byte_count) { | 1568 | while (byte_count) { |
1575 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | 1569 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); |
1576 | 1570 | ||
1577 | amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, | 1571 | amdgpu_vm_set_pte_pde(adev, &job->ibs[0], |
1578 | dst_addr, cur_size_in_bytes); | 1572 | dst_addr, 0, |
1573 | cur_size_in_bytes >> 3, 0, | ||
1574 | src_data); | ||
1579 | 1575 | ||
1580 | dst_addr += cur_size_in_bytes; | 1576 | dst_addr += cur_size_in_bytes; |
1581 | byte_count -= cur_size_in_bytes; | 1577 | byte_count -= cur_size_in_bytes; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f137c2458ee8..0e2399f32de7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | |||
@@ -73,7 +73,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, | |||
73 | struct dma_fence **fence, bool direct_submit, | 73 | struct dma_fence **fence, bool direct_submit, |
74 | bool vm_needs_flush); | 74 | bool vm_needs_flush); |
75 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, | 75 | int amdgpu_fill_buffer(struct amdgpu_bo *bo, |
76 | uint32_t src_data, | 76 | uint64_t src_data, |
77 | struct reservation_object *resv, | 77 | struct reservation_object *resv, |
78 | struct dma_fence **fence); | 78 | struct dma_fence **fence); |
79 | 79 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index fcfb9d4f7477..36c763310df5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | |||
@@ -358,8 +358,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, | |||
358 | (le32_to_cpu(header->jt_offset) * 4); | 358 | (le32_to_cpu(header->jt_offset) * 4); |
359 | memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); | 359 | memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4); |
360 | 360 | ||
361 | ucode->ucode_size += le32_to_cpu(header->jt_size) * 4; | ||
362 | |||
363 | return 0; | 361 | return 0; |
364 | } | 362 | } |
365 | 363 | ||
@@ -381,7 +379,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) | |||
381 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, | 379 | err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true, |
382 | amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, | 380 | amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, |
383 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 381 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
384 | NULL, NULL, bo); | 382 | NULL, NULL, 0, bo); |
385 | if (err) { | 383 | if (err) { |
386 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); | 384 | dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err); |
387 | goto failed; | 385 | goto failed; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 2ca09f111f08..aefecf6c1e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
@@ -1051,7 +1051,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1051 | AMDGPU_GEM_DOMAIN_VRAM, | 1051 | AMDGPU_GEM_DOMAIN_VRAM, |
1052 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 1052 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1053 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 1053 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
1054 | NULL, NULL, &bo); | 1054 | NULL, NULL, 0, &bo); |
1055 | if (r) | 1055 | if (r) |
1056 | return r; | 1056 | return r; |
1057 | 1057 | ||
@@ -1101,7 +1101,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |||
1101 | AMDGPU_GEM_DOMAIN_VRAM, | 1101 | AMDGPU_GEM_DOMAIN_VRAM, |
1102 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 1102 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
1103 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 1103 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
1104 | NULL, NULL, &bo); | 1104 | NULL, NULL, 0, &bo); |
1105 | if (r) | 1105 | if (r) |
1106 | return r; | 1106 | return r; |
1107 | 1107 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b692ad402252..c855366521ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | |||
@@ -937,9 +937,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) | |||
937 | unsigned i; | 937 | unsigned i; |
938 | int r, timeout = adev->usec_timeout; | 938 | int r, timeout = adev->usec_timeout; |
939 | 939 | ||
940 | /* workaround VCE ring test slow issue for sriov*/ | 940 | /* skip ring test for sriov*/ |
941 | if (amdgpu_sriov_vf(adev)) | 941 | if (amdgpu_sriov_vf(adev)) |
942 | timeout *= 10; | 942 | return 0; |
943 | 943 | ||
944 | r = amdgpu_ring_alloc(ring, 16); | 944 | r = amdgpu_ring_alloc(ring, 16); |
945 | if (r) { | 945 | if (r) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 09190fadd228..041e0121590c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | |||
@@ -209,9 +209,9 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) | |||
209 | 209 | ||
210 | if (fences == 0) { | 210 | if (fences == 0) { |
211 | if (adev->pm.dpm_enabled) { | 211 | if (adev->pm.dpm_enabled) { |
212 | /* might be used when with pg/cg | ||
212 | amdgpu_dpm_enable_uvd(adev, false); | 213 | amdgpu_dpm_enable_uvd(adev, false); |
213 | } else { | 214 | */ |
214 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | ||
215 | } | 215 | } |
216 | } else { | 216 | } else { |
217 | schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); | 217 | schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); |
@@ -223,12 +223,10 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) | |||
223 | struct amdgpu_device *adev = ring->adev; | 223 | struct amdgpu_device *adev = ring->adev; |
224 | bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); | 224 | bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); |
225 | 225 | ||
226 | if (set_clocks) { | 226 | if (set_clocks && adev->pm.dpm_enabled) { |
227 | if (adev->pm.dpm_enabled) { | 227 | /* might be used when with pg/cg |
228 | amdgpu_dpm_enable_uvd(adev, true); | 228 | amdgpu_dpm_enable_uvd(adev, true); |
229 | } else { | 229 | */ |
230 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | ||
231 | } | ||
232 | } | 230 | } |
233 | } | 231 | } |
234 | 232 | ||
@@ -361,7 +359,7 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand | |||
361 | AMDGPU_GEM_DOMAIN_VRAM, | 359 | AMDGPU_GEM_DOMAIN_VRAM, |
362 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 360 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
363 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 361 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
364 | NULL, NULL, &bo); | 362 | NULL, NULL, 0, &bo); |
365 | if (r) | 363 | if (r) |
366 | return r; | 364 | return r; |
367 | 365 | ||
@@ -413,7 +411,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han | |||
413 | AMDGPU_GEM_DOMAIN_VRAM, | 411 | AMDGPU_GEM_DOMAIN_VRAM, |
414 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 412 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
415 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 413 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, |
416 | NULL, NULL, &bo); | 414 | NULL, NULL, 0, &bo); |
417 | if (r) | 415 | if (r) |
418 | return r; | 416 | return r; |
419 | 417 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 250c8e80e646..9ce36652029e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -288,6 +288,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, | |||
288 | unsigned pt_idx, from, to; | 288 | unsigned pt_idx, from, to; |
289 | int r; | 289 | int r; |
290 | u64 flags; | 290 | u64 flags; |
291 | uint64_t init_value = 0; | ||
291 | 292 | ||
292 | if (!parent->entries) { | 293 | if (!parent->entries) { |
293 | unsigned num_entries = amdgpu_vm_num_entries(adev, level); | 294 | unsigned num_entries = amdgpu_vm_num_entries(adev, level); |
@@ -321,6 +322,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, | |||
321 | flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | | 322 | flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | |
322 | AMDGPU_GEM_CREATE_SHADOW); | 323 | AMDGPU_GEM_CREATE_SHADOW); |
323 | 324 | ||
325 | if (vm->pte_support_ats) { | ||
326 | init_value = AMDGPU_PTE_SYSTEM; | ||
327 | if (level != adev->vm_manager.num_level - 1) | ||
328 | init_value |= AMDGPU_PDE_PTE; | ||
329 | } | ||
330 | |||
324 | /* walk over the address space and allocate the page tables */ | 331 | /* walk over the address space and allocate the page tables */ |
325 | for (pt_idx = from; pt_idx <= to; ++pt_idx) { | 332 | for (pt_idx = from; pt_idx <= to; ++pt_idx) { |
326 | struct reservation_object *resv = vm->root.bo->tbo.resv; | 333 | struct reservation_object *resv = vm->root.bo->tbo.resv; |
@@ -333,7 +340,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, | |||
333 | AMDGPU_GPU_PAGE_SIZE, true, | 340 | AMDGPU_GPU_PAGE_SIZE, true, |
334 | AMDGPU_GEM_DOMAIN_VRAM, | 341 | AMDGPU_GEM_DOMAIN_VRAM, |
335 | flags, | 342 | flags, |
336 | NULL, resv, &pt); | 343 | NULL, resv, init_value, &pt); |
337 | if (r) | 344 | if (r) |
338 | return r; | 345 | return r; |
339 | 346 | ||
@@ -1060,7 +1067,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev, | |||
1060 | shadow = parent->bo->shadow; | 1067 | shadow = parent->bo->shadow; |
1061 | 1068 | ||
1062 | if (vm->use_cpu_for_update) { | 1069 | if (vm->use_cpu_for_update) { |
1063 | pd_addr = (unsigned long)parent->bo->kptr; | 1070 | pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo); |
1064 | r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); | 1071 | r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); |
1065 | if (unlikely(r)) | 1072 | if (unlikely(r)) |
1066 | return r; | 1073 | return r; |
@@ -1401,7 +1408,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params, | |||
1401 | 1408 | ||
1402 | pt = entry->bo; | 1409 | pt = entry->bo; |
1403 | if (use_cpu_update) { | 1410 | if (use_cpu_update) { |
1404 | pe_start = (unsigned long)pt->kptr; | 1411 | pe_start = (unsigned long)amdgpu_bo_kptr(pt); |
1405 | } else { | 1412 | } else { |
1406 | if (pt->shadow) { | 1413 | if (pt->shadow) { |
1407 | pe_start = amdgpu_bo_gpu_offset(pt->shadow); | 1414 | pe_start = amdgpu_bo_gpu_offset(pt->shadow); |
@@ -1995,15 +2002,19 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | |||
1995 | struct amdgpu_bo_va_mapping *mapping; | 2002 | struct amdgpu_bo_va_mapping *mapping; |
1996 | struct dma_fence *f = NULL; | 2003 | struct dma_fence *f = NULL; |
1997 | int r; | 2004 | int r; |
2005 | uint64_t init_pte_value = 0; | ||
1998 | 2006 | ||
1999 | while (!list_empty(&vm->freed)) { | 2007 | while (!list_empty(&vm->freed)) { |
2000 | mapping = list_first_entry(&vm->freed, | 2008 | mapping = list_first_entry(&vm->freed, |
2001 | struct amdgpu_bo_va_mapping, list); | 2009 | struct amdgpu_bo_va_mapping, list); |
2002 | list_del(&mapping->list); | 2010 | list_del(&mapping->list); |
2003 | 2011 | ||
2012 | if (vm->pte_support_ats) | ||
2013 | init_pte_value = AMDGPU_PTE_SYSTEM; | ||
2014 | |||
2004 | r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, | 2015 | r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm, |
2005 | mapping->start, mapping->last, | 2016 | mapping->start, mapping->last, |
2006 | 0, 0, &f); | 2017 | init_pte_value, 0, &f); |
2007 | amdgpu_vm_free_mapping(adev, vm, mapping, f); | 2018 | amdgpu_vm_free_mapping(adev, vm, mapping, f); |
2008 | if (r) { | 2019 | if (r) { |
2009 | dma_fence_put(f); | 2020 | dma_fence_put(f); |
@@ -2494,6 +2505,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2494 | struct amd_sched_rq *rq; | 2505 | struct amd_sched_rq *rq; |
2495 | int r, i; | 2506 | int r, i; |
2496 | u64 flags; | 2507 | u64 flags; |
2508 | uint64_t init_pde_value = 0; | ||
2497 | 2509 | ||
2498 | vm->va = RB_ROOT; | 2510 | vm->va = RB_ROOT; |
2499 | vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); | 2511 | vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter); |
@@ -2515,10 +2527,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2515 | if (r) | 2527 | if (r) |
2516 | return r; | 2528 | return r; |
2517 | 2529 | ||
2518 | if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) | 2530 | vm->pte_support_ats = false; |
2531 | |||
2532 | if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { | ||
2519 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & | 2533 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & |
2520 | AMDGPU_VM_USE_CPU_FOR_COMPUTE); | 2534 | AMDGPU_VM_USE_CPU_FOR_COMPUTE); |
2521 | else | 2535 | |
2536 | if (adev->asic_type == CHIP_RAVEN) { | ||
2537 | vm->pte_support_ats = true; | ||
2538 | init_pde_value = AMDGPU_PTE_SYSTEM | AMDGPU_PDE_PTE; | ||
2539 | } | ||
2540 | } else | ||
2522 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & | 2541 | vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & |
2523 | AMDGPU_VM_USE_CPU_FOR_GFX); | 2542 | AMDGPU_VM_USE_CPU_FOR_GFX); |
2524 | DRM_DEBUG_DRIVER("VM update mode is %s\n", | 2543 | DRM_DEBUG_DRIVER("VM update mode is %s\n", |
@@ -2538,7 +2557,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, | |||
2538 | r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, | 2557 | r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true, |
2539 | AMDGPU_GEM_DOMAIN_VRAM, | 2558 | AMDGPU_GEM_DOMAIN_VRAM, |
2540 | flags, | 2559 | flags, |
2541 | NULL, NULL, &vm->root.bo); | 2560 | NULL, NULL, init_pde_value, &vm->root.bo); |
2542 | if (r) | 2561 | if (r) |
2543 | goto error_free_sched_entity; | 2562 | goto error_free_sched_entity; |
2544 | 2563 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 34d9174ebff2..217ecba8f4cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | |||
@@ -146,6 +146,9 @@ struct amdgpu_vm { | |||
146 | 146 | ||
147 | /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ | 147 | /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ |
148 | bool use_cpu_for_update; | 148 | bool use_cpu_for_update; |
149 | |||
150 | /* Flag to indicate ATS support from PTE for GFX9 */ | ||
151 | bool pte_support_ats; | ||
149 | }; | 152 | }; |
150 | 153 | ||
151 | struct amdgpu_vm_id { | 154 | struct amdgpu_vm_id { |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 490e84944851..4e519dc42916 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2431,7 +2431,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2431 | aobj = gem_to_amdgpu_bo(obj); | 2431 | aobj = gem_to_amdgpu_bo(obj); |
2432 | ret = amdgpu_bo_reserve(aobj, false); | 2432 | ret = amdgpu_bo_reserve(aobj, false); |
2433 | if (ret != 0) { | 2433 | if (ret != 0) { |
2434 | drm_gem_object_unreference_unlocked(obj); | 2434 | drm_gem_object_put_unlocked(obj); |
2435 | return ret; | 2435 | return ret; |
2436 | } | 2436 | } |
2437 | 2437 | ||
@@ -2439,7 +2439,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2439 | amdgpu_bo_unreserve(aobj); | 2439 | amdgpu_bo_unreserve(aobj); |
2440 | if (ret) { | 2440 | if (ret) { |
2441 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); | 2441 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); |
2442 | drm_gem_object_unreference_unlocked(obj); | 2442 | drm_gem_object_put_unlocked(obj); |
2443 | return ret; | 2443 | return ret; |
2444 | } | 2444 | } |
2445 | 2445 | ||
@@ -2473,7 +2473,7 @@ unpin: | |||
2473 | amdgpu_bo_unpin(aobj); | 2473 | amdgpu_bo_unpin(aobj); |
2474 | amdgpu_bo_unreserve(aobj); | 2474 | amdgpu_bo_unreserve(aobj); |
2475 | } | 2475 | } |
2476 | drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); | 2476 | drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); |
2477 | } | 2477 | } |
2478 | 2478 | ||
2479 | amdgpu_crtc->cursor_bo = obj; | 2479 | amdgpu_crtc->cursor_bo = obj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 921c6f772f11..11edc75edaa9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -2506,7 +2506,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2506 | aobj = gem_to_amdgpu_bo(obj); | 2506 | aobj = gem_to_amdgpu_bo(obj); |
2507 | ret = amdgpu_bo_reserve(aobj, false); | 2507 | ret = amdgpu_bo_reserve(aobj, false); |
2508 | if (ret != 0) { | 2508 | if (ret != 0) { |
2509 | drm_gem_object_unreference_unlocked(obj); | 2509 | drm_gem_object_put_unlocked(obj); |
2510 | return ret; | 2510 | return ret; |
2511 | } | 2511 | } |
2512 | 2512 | ||
@@ -2514,7 +2514,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2514 | amdgpu_bo_unreserve(aobj); | 2514 | amdgpu_bo_unreserve(aobj); |
2515 | if (ret) { | 2515 | if (ret) { |
2516 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); | 2516 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); |
2517 | drm_gem_object_unreference_unlocked(obj); | 2517 | drm_gem_object_put_unlocked(obj); |
2518 | return ret; | 2518 | return ret; |
2519 | } | 2519 | } |
2520 | 2520 | ||
@@ -2548,7 +2548,7 @@ unpin: | |||
2548 | amdgpu_bo_unpin(aobj); | 2548 | amdgpu_bo_unpin(aobj); |
2549 | amdgpu_bo_unreserve(aobj); | 2549 | amdgpu_bo_unreserve(aobj); |
2550 | } | 2550 | } |
2551 | drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); | 2551 | drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); |
2552 | } | 2552 | } |
2553 | 2553 | ||
2554 | amdgpu_crtc->cursor_bo = obj; | 2554 | amdgpu_crtc->cursor_bo = obj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index bcd9521237f4..a51e35f824a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "dce/dce_6_0_d.h" | 42 | #include "dce/dce_6_0_d.h" |
43 | #include "dce/dce_6_0_sh_mask.h" | 43 | #include "dce/dce_6_0_sh_mask.h" |
44 | #include "gca/gfx_7_2_enum.h" | 44 | #include "gca/gfx_7_2_enum.h" |
45 | #include "dce_v6_0.h" | ||
45 | #include "si_enums.h" | 46 | #include "si_enums.h" |
46 | 47 | ||
47 | static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); | 48 | static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev); |
@@ -2321,7 +2322,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2321 | aobj = gem_to_amdgpu_bo(obj); | 2322 | aobj = gem_to_amdgpu_bo(obj); |
2322 | ret = amdgpu_bo_reserve(aobj, false); | 2323 | ret = amdgpu_bo_reserve(aobj, false); |
2323 | if (ret != 0) { | 2324 | if (ret != 0) { |
2324 | drm_gem_object_unreference_unlocked(obj); | 2325 | drm_gem_object_put_unlocked(obj); |
2325 | return ret; | 2326 | return ret; |
2326 | } | 2327 | } |
2327 | 2328 | ||
@@ -2329,7 +2330,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2329 | amdgpu_bo_unreserve(aobj); | 2330 | amdgpu_bo_unreserve(aobj); |
2330 | if (ret) { | 2331 | if (ret) { |
2331 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); | 2332 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); |
2332 | drm_gem_object_unreference_unlocked(obj); | 2333 | drm_gem_object_put_unlocked(obj); |
2333 | return ret; | 2334 | return ret; |
2334 | } | 2335 | } |
2335 | 2336 | ||
@@ -2363,7 +2364,7 @@ unpin: | |||
2363 | amdgpu_bo_unpin(aobj); | 2364 | amdgpu_bo_unpin(aobj); |
2364 | amdgpu_bo_unreserve(aobj); | 2365 | amdgpu_bo_unreserve(aobj); |
2365 | } | 2366 | } |
2366 | drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); | 2367 | drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); |
2367 | } | 2368 | } |
2368 | 2369 | ||
2369 | amdgpu_crtc->cursor_bo = obj; | 2370 | amdgpu_crtc->cursor_bo = obj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 609438fe8584..9cf14b8b2db9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | |||
@@ -2335,7 +2335,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2335 | aobj = gem_to_amdgpu_bo(obj); | 2335 | aobj = gem_to_amdgpu_bo(obj); |
2336 | ret = amdgpu_bo_reserve(aobj, false); | 2336 | ret = amdgpu_bo_reserve(aobj, false); |
2337 | if (ret != 0) { | 2337 | if (ret != 0) { |
2338 | drm_gem_object_unreference_unlocked(obj); | 2338 | drm_gem_object_put_unlocked(obj); |
2339 | return ret; | 2339 | return ret; |
2340 | } | 2340 | } |
2341 | 2341 | ||
@@ -2343,7 +2343,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc, | |||
2343 | amdgpu_bo_unreserve(aobj); | 2343 | amdgpu_bo_unreserve(aobj); |
2344 | if (ret) { | 2344 | if (ret) { |
2345 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); | 2345 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); |
2346 | drm_gem_object_unreference_unlocked(obj); | 2346 | drm_gem_object_put_unlocked(obj); |
2347 | return ret; | 2347 | return ret; |
2348 | } | 2348 | } |
2349 | 2349 | ||
@@ -2377,7 +2377,7 @@ unpin: | |||
2377 | amdgpu_bo_unpin(aobj); | 2377 | amdgpu_bo_unpin(aobj); |
2378 | amdgpu_bo_unreserve(aobj); | 2378 | amdgpu_bo_unreserve(aobj); |
2379 | } | 2379 | } |
2380 | drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo); | 2380 | drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo); |
2381 | } | 2381 | } |
2382 | 2382 | ||
2383 | amdgpu_crtc->cursor_bo = obj; | 2383 | amdgpu_crtc->cursor_bo = obj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 5ed919e45351..b9ee9073cb0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c | |||
@@ -479,6 +479,8 @@ static int dce_virtual_hw_init(void *handle) | |||
479 | #endif | 479 | #endif |
480 | /* no DCE */ | 480 | /* no DCE */ |
481 | break; | 481 | break; |
482 | case CHIP_VEGA10: | ||
483 | break; | ||
482 | default: | 484 | default: |
483 | DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); | 485 | DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); |
484 | } | 486 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index 4ac85f47f287..d228f5a99044 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | |||
@@ -2217,40 +2217,9 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
2217 | 2217 | ||
2218 | static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) | 2218 | static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) |
2219 | { | 2219 | { |
2220 | int r; | 2220 | amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); |
2221 | 2221 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); | |
2222 | if (adev->gfx.rlc.save_restore_obj) { | 2222 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); |
2223 | r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); | ||
2224 | if (unlikely(r != 0)) | ||
2225 | dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); | ||
2226 | amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); | ||
2227 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
2228 | |||
2229 | amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); | ||
2230 | adev->gfx.rlc.save_restore_obj = NULL; | ||
2231 | } | ||
2232 | |||
2233 | if (adev->gfx.rlc.clear_state_obj) { | ||
2234 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); | ||
2235 | if (unlikely(r != 0)) | ||
2236 | dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); | ||
2237 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
2238 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
2239 | |||
2240 | amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); | ||
2241 | adev->gfx.rlc.clear_state_obj = NULL; | ||
2242 | } | ||
2243 | |||
2244 | if (adev->gfx.rlc.cp_table_obj) { | ||
2245 | r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); | ||
2246 | if (unlikely(r != 0)) | ||
2247 | dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); | ||
2248 | amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); | ||
2249 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
2250 | |||
2251 | amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); | ||
2252 | adev->gfx.rlc.cp_table_obj = NULL; | ||
2253 | } | ||
2254 | } | 2223 | } |
2255 | 2224 | ||
2256 | static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | 2225 | static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) |
@@ -2273,43 +2242,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | |||
2273 | 2242 | ||
2274 | if (src_ptr) { | 2243 | if (src_ptr) { |
2275 | /* save restore block */ | 2244 | /* save restore block */ |
2276 | if (adev->gfx.rlc.save_restore_obj == NULL) { | 2245 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
2277 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 2246 | AMDGPU_GEM_DOMAIN_VRAM, |
2278 | AMDGPU_GEM_DOMAIN_VRAM, | 2247 | &adev->gfx.rlc.save_restore_obj, |
2279 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 2248 | &adev->gfx.rlc.save_restore_gpu_addr, |
2280 | NULL, NULL, | 2249 | (void **)&adev->gfx.rlc.sr_ptr); |
2281 | &adev->gfx.rlc.save_restore_obj); | ||
2282 | |||
2283 | if (r) { | ||
2284 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); | ||
2285 | return r; | ||
2286 | } | ||
2287 | } | ||
2288 | |||
2289 | r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); | ||
2290 | if (unlikely(r != 0)) { | ||
2291 | gfx_v6_0_rlc_fini(adev); | ||
2292 | return r; | ||
2293 | } | ||
2294 | r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
2295 | &adev->gfx.rlc.save_restore_gpu_addr); | ||
2296 | if (r) { | 2250 | if (r) { |
2297 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | 2251 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", |
2298 | dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); | 2252 | r); |
2299 | gfx_v6_0_rlc_fini(adev); | 2253 | gfx_v6_0_rlc_fini(adev); |
2300 | return r; | 2254 | return r; |
2301 | } | 2255 | } |
2302 | 2256 | ||
2303 | r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); | ||
2304 | if (r) { | ||
2305 | dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); | ||
2306 | gfx_v6_0_rlc_fini(adev); | ||
2307 | return r; | ||
2308 | } | ||
2309 | /* write the sr buffer */ | 2257 | /* write the sr buffer */ |
2310 | dst_ptr = adev->gfx.rlc.sr_ptr; | 2258 | dst_ptr = adev->gfx.rlc.sr_ptr; |
2311 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | 2259 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) |
2312 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); | 2260 | dst_ptr[i] = cpu_to_le32(src_ptr[i]); |
2261 | |||
2313 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); | 2262 | amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); |
2314 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | 2263 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); |
2315 | } | 2264 | } |
@@ -2319,39 +2268,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) | |||
2319 | adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); | 2268 | adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); |
2320 | dws = adev->gfx.rlc.clear_state_size + (256 / 4); | 2269 | dws = adev->gfx.rlc.clear_state_size + (256 / 4); |
2321 | 2270 | ||
2322 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 2271 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
2323 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 2272 | AMDGPU_GEM_DOMAIN_VRAM, |
2324 | AMDGPU_GEM_DOMAIN_VRAM, | 2273 | &adev->gfx.rlc.clear_state_obj, |
2325 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | 2274 | &adev->gfx.rlc.clear_state_gpu_addr, |
2326 | NULL, NULL, | 2275 | (void **)&adev->gfx.rlc.cs_ptr); |
2327 | &adev->gfx.rlc.clear_state_obj); | ||
2328 | |||
2329 | if (r) { | ||
2330 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
2331 | gfx_v6_0_rlc_fini(adev); | ||
2332 | return r; | ||
2333 | } | ||
2334 | } | ||
2335 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); | ||
2336 | if (unlikely(r != 0)) { | ||
2337 | gfx_v6_0_rlc_fini(adev); | ||
2338 | return r; | ||
2339 | } | ||
2340 | r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
2341 | &adev->gfx.rlc.clear_state_gpu_addr); | ||
2342 | if (r) { | 2276 | if (r) { |
2343 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | 2277 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); |
2344 | dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); | ||
2345 | gfx_v6_0_rlc_fini(adev); | 2278 | gfx_v6_0_rlc_fini(adev); |
2346 | return r; | 2279 | return r; |
2347 | } | 2280 | } |
2348 | 2281 | ||
2349 | r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); | ||
2350 | if (r) { | ||
2351 | dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); | ||
2352 | gfx_v6_0_rlc_fini(adev); | ||
2353 | return r; | ||
2354 | } | ||
2355 | /* set up the cs buffer */ | 2282 | /* set up the cs buffer */ |
2356 | dst_ptr = adev->gfx.rlc.cs_ptr; | 2283 | dst_ptr = adev->gfx.rlc.cs_ptr; |
2357 | reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; | 2284 | reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 17b7c6934b0a..53a4af7596c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
@@ -1823,7 +1823,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) | |||
1823 | } | 1823 | } |
1824 | 1824 | ||
1825 | /** | 1825 | /** |
1826 | * gmc_v7_0_init_compute_vmid - gart enable | 1826 | * gfx_v7_0_init_compute_vmid - gart enable |
1827 | * | 1827 | * |
1828 | * @adev: amdgpu_device pointer | 1828 | * @adev: amdgpu_device pointer |
1829 | * | 1829 | * |
@@ -1833,7 +1833,7 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) | |||
1833 | #define DEFAULT_SH_MEM_BASES (0x6000) | 1833 | #define DEFAULT_SH_MEM_BASES (0x6000) |
1834 | #define FIRST_COMPUTE_VMID (8) | 1834 | #define FIRST_COMPUTE_VMID (8) |
1835 | #define LAST_COMPUTE_VMID (16) | 1835 | #define LAST_COMPUTE_VMID (16) |
1836 | static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) | 1836 | static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev) |
1837 | { | 1837 | { |
1838 | int i; | 1838 | int i; |
1839 | uint32_t sh_mem_config; | 1839 | uint32_t sh_mem_config; |
@@ -1939,7 +1939,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) | |||
1939 | cik_srbm_select(adev, 0, 0, 0, 0); | 1939 | cik_srbm_select(adev, 0, 0, 0, 0); |
1940 | mutex_unlock(&adev->srbm_mutex); | 1940 | mutex_unlock(&adev->srbm_mutex); |
1941 | 1941 | ||
1942 | gmc_v7_0_init_compute_vmid(adev); | 1942 | gfx_v7_0_init_compute_vmid(adev); |
1943 | 1943 | ||
1944 | WREG32(mmSX_DEBUG_1, 0x20); | 1944 | WREG32(mmSX_DEBUG_1, 0x20); |
1945 | 1945 | ||
@@ -2774,39 +2774,18 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev) | |||
2774 | */ | 2774 | */ |
2775 | static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) | 2775 | static void gfx_v7_0_cp_compute_fini(struct amdgpu_device *adev) |
2776 | { | 2776 | { |
2777 | int i, r; | 2777 | int i; |
2778 | 2778 | ||
2779 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { | 2779 | for (i = 0; i < adev->gfx.num_compute_rings; i++) { |
2780 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; | 2780 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; |
2781 | 2781 | ||
2782 | if (ring->mqd_obj) { | 2782 | amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); |
2783 | r = amdgpu_bo_reserve(ring->mqd_obj, true); | ||
2784 | if (unlikely(r != 0)) | ||
2785 | dev_warn(adev->dev, "(%d) reserve MQD bo failed\n", r); | ||
2786 | |||
2787 | amdgpu_bo_unpin(ring->mqd_obj); | ||
2788 | amdgpu_bo_unreserve(ring->mqd_obj); | ||
2789 | |||
2790 | amdgpu_bo_unref(&ring->mqd_obj); | ||
2791 | ring->mqd_obj = NULL; | ||
2792 | } | ||
2793 | } | 2783 | } |
2794 | } | 2784 | } |
2795 | 2785 | ||
2796 | static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) | 2786 | static void gfx_v7_0_mec_fini(struct amdgpu_device *adev) |
2797 | { | 2787 | { |
2798 | int r; | 2788 | amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
2799 | |||
2800 | if (adev->gfx.mec.hpd_eop_obj) { | ||
2801 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); | ||
2802 | if (unlikely(r != 0)) | ||
2803 | dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); | ||
2804 | amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); | ||
2805 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | ||
2806 | |||
2807 | amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); | ||
2808 | adev->gfx.mec.hpd_eop_obj = NULL; | ||
2809 | } | ||
2810 | } | 2789 | } |
2811 | 2790 | ||
2812 | static int gfx_v7_0_mec_init(struct amdgpu_device *adev) | 2791 | static int gfx_v7_0_mec_init(struct amdgpu_device *adev) |
@@ -2823,33 +2802,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev) | |||
2823 | /* allocate space for ALL pipes (even the ones we don't own) */ | 2802 | /* allocate space for ALL pipes (even the ones we don't own) */ |
2824 | mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec | 2803 | mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec |
2825 | * GFX7_MEC_HPD_SIZE * 2; | 2804 | * GFX7_MEC_HPD_SIZE * 2; |
2826 | if (adev->gfx.mec.hpd_eop_obj == NULL) { | ||
2827 | r = amdgpu_bo_create(adev, | ||
2828 | mec_hpd_size, | ||
2829 | PAGE_SIZE, true, | ||
2830 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | ||
2831 | &adev->gfx.mec.hpd_eop_obj); | ||
2832 | if (r) { | ||
2833 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | ||
2834 | return r; | ||
2835 | } | ||
2836 | } | ||
2837 | 2805 | ||
2838 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); | 2806 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
2839 | if (unlikely(r != 0)) { | 2807 | AMDGPU_GEM_DOMAIN_GTT, |
2840 | gfx_v7_0_mec_fini(adev); | 2808 | &adev->gfx.mec.hpd_eop_obj, |
2841 | return r; | 2809 | &adev->gfx.mec.hpd_eop_gpu_addr, |
2842 | } | 2810 | (void **)&hpd); |
2843 | r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
2844 | &adev->gfx.mec.hpd_eop_gpu_addr); | ||
2845 | if (r) { | ||
2846 | dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); | ||
2847 | gfx_v7_0_mec_fini(adev); | ||
2848 | return r; | ||
2849 | } | ||
2850 | r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); | ||
2851 | if (r) { | 2811 | if (r) { |
2852 | dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); | 2812 | dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r); |
2853 | gfx_v7_0_mec_fini(adev); | 2813 | gfx_v7_0_mec_fini(adev); |
2854 | return r; | 2814 | return r; |
2855 | } | 2815 | } |
@@ -3108,32 +3068,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) | |||
3108 | struct cik_mqd *mqd; | 3068 | struct cik_mqd *mqd; |
3109 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; | 3069 | struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; |
3110 | 3070 | ||
3111 | if (ring->mqd_obj == NULL) { | 3071 | r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE, |
3112 | r = amdgpu_bo_create(adev, | 3072 | AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, |
3113 | sizeof(struct cik_mqd), | 3073 | &mqd_gpu_addr, (void **)&mqd); |
3114 | PAGE_SIZE, true, | ||
3115 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | ||
3116 | &ring->mqd_obj); | ||
3117 | if (r) { | ||
3118 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); | ||
3119 | return r; | ||
3120 | } | ||
3121 | } | ||
3122 | |||
3123 | r = amdgpu_bo_reserve(ring->mqd_obj, false); | ||
3124 | if (unlikely(r != 0)) | ||
3125 | goto out; | ||
3126 | |||
3127 | r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
3128 | &mqd_gpu_addr); | ||
3129 | if (r) { | ||
3130 | dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); | ||
3131 | goto out_unreserve; | ||
3132 | } | ||
3133 | r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); | ||
3134 | if (r) { | 3074 | if (r) { |
3135 | dev_warn(adev->dev, "(%d) map MQD bo failed\n", r); | 3075 | dev_warn(adev->dev, "(%d) create MQD bo failed\n", r); |
3136 | goto out_unreserve; | 3076 | return r; |
3137 | } | 3077 | } |
3138 | 3078 | ||
3139 | mutex_lock(&adev->srbm_mutex); | 3079 | mutex_lock(&adev->srbm_mutex); |
@@ -3147,9 +3087,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id) | |||
3147 | mutex_unlock(&adev->srbm_mutex); | 3087 | mutex_unlock(&adev->srbm_mutex); |
3148 | 3088 | ||
3149 | amdgpu_bo_kunmap(ring->mqd_obj); | 3089 | amdgpu_bo_kunmap(ring->mqd_obj); |
3150 | out_unreserve: | ||
3151 | amdgpu_bo_unreserve(ring->mqd_obj); | 3090 | amdgpu_bo_unreserve(ring->mqd_obj); |
3152 | out: | ||
3153 | return 0; | 3091 | return 0; |
3154 | } | 3092 | } |
3155 | 3093 | ||
@@ -3361,43 +3299,9 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, | |||
3361 | */ | 3299 | */ |
3362 | static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) | 3300 | static void gfx_v7_0_rlc_fini(struct amdgpu_device *adev) |
3363 | { | 3301 | { |
3364 | int r; | 3302 | amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj, NULL, NULL); |
3365 | 3303 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); | |
3366 | /* save restore block */ | 3304 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); |
3367 | if (adev->gfx.rlc.save_restore_obj) { | ||
3368 | r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, true); | ||
3369 | if (unlikely(r != 0)) | ||
3370 | dev_warn(adev->dev, "(%d) reserve RLC sr bo failed\n", r); | ||
3371 | amdgpu_bo_unpin(adev->gfx.rlc.save_restore_obj); | ||
3372 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | ||
3373 | |||
3374 | amdgpu_bo_unref(&adev->gfx.rlc.save_restore_obj); | ||
3375 | adev->gfx.rlc.save_restore_obj = NULL; | ||
3376 | } | ||
3377 | |||
3378 | /* clear state block */ | ||
3379 | if (adev->gfx.rlc.clear_state_obj) { | ||
3380 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); | ||
3381 | if (unlikely(r != 0)) | ||
3382 | dev_warn(adev->dev, "(%d) reserve RLC c bo failed\n", r); | ||
3383 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
3384 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
3385 | |||
3386 | amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); | ||
3387 | adev->gfx.rlc.clear_state_obj = NULL; | ||
3388 | } | ||
3389 | |||
3390 | /* clear state block */ | ||
3391 | if (adev->gfx.rlc.cp_table_obj) { | ||
3392 | r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); | ||
3393 | if (unlikely(r != 0)) | ||
3394 | dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); | ||
3395 | amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); | ||
3396 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
3397 | |||
3398 | amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); | ||
3399 | adev->gfx.rlc.cp_table_obj = NULL; | ||
3400 | } | ||
3401 | } | 3305 | } |
3402 | 3306 | ||
3403 | static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | 3307 | static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) |
@@ -3432,39 +3336,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3432 | 3336 | ||
3433 | if (src_ptr) { | 3337 | if (src_ptr) { |
3434 | /* save restore block */ | 3338 | /* save restore block */ |
3435 | if (adev->gfx.rlc.save_restore_obj == NULL) { | 3339 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
3436 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3340 | AMDGPU_GEM_DOMAIN_VRAM, |
3437 | AMDGPU_GEM_DOMAIN_VRAM, | 3341 | &adev->gfx.rlc.save_restore_obj, |
3438 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 3342 | &adev->gfx.rlc.save_restore_gpu_addr, |
3439 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 3343 | (void **)&adev->gfx.rlc.sr_ptr); |
3440 | NULL, NULL, | ||
3441 | &adev->gfx.rlc.save_restore_obj); | ||
3442 | if (r) { | ||
3443 | dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r); | ||
3444 | return r; | ||
3445 | } | ||
3446 | } | ||
3447 | |||
3448 | r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false); | ||
3449 | if (unlikely(r != 0)) { | ||
3450 | gfx_v7_0_rlc_fini(adev); | ||
3451 | return r; | ||
3452 | } | ||
3453 | r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
3454 | &adev->gfx.rlc.save_restore_gpu_addr); | ||
3455 | if (r) { | 3344 | if (r) { |
3456 | amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); | 3345 | dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r); |
3457 | dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); | ||
3458 | gfx_v7_0_rlc_fini(adev); | 3346 | gfx_v7_0_rlc_fini(adev); |
3459 | return r; | 3347 | return r; |
3460 | } | 3348 | } |
3461 | 3349 | ||
3462 | r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr); | ||
3463 | if (r) { | ||
3464 | dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r); | ||
3465 | gfx_v7_0_rlc_fini(adev); | ||
3466 | return r; | ||
3467 | } | ||
3468 | /* write the sr buffer */ | 3350 | /* write the sr buffer */ |
3469 | dst_ptr = adev->gfx.rlc.sr_ptr; | 3351 | dst_ptr = adev->gfx.rlc.sr_ptr; |
3470 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) | 3352 | for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) |
@@ -3477,39 +3359,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3477 | /* clear state block */ | 3359 | /* clear state block */ |
3478 | adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); | 3360 | adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); |
3479 | 3361 | ||
3480 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 3362 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
3481 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 3363 | AMDGPU_GEM_DOMAIN_VRAM, |
3482 | AMDGPU_GEM_DOMAIN_VRAM, | 3364 | &adev->gfx.rlc.clear_state_obj, |
3483 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 3365 | &adev->gfx.rlc.clear_state_gpu_addr, |
3484 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 3366 | (void **)&adev->gfx.rlc.cs_ptr); |
3485 | NULL, NULL, | ||
3486 | &adev->gfx.rlc.clear_state_obj); | ||
3487 | if (r) { | ||
3488 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
3489 | gfx_v7_0_rlc_fini(adev); | ||
3490 | return r; | ||
3491 | } | ||
3492 | } | ||
3493 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); | ||
3494 | if (unlikely(r != 0)) { | ||
3495 | gfx_v7_0_rlc_fini(adev); | ||
3496 | return r; | ||
3497 | } | ||
3498 | r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
3499 | &adev->gfx.rlc.clear_state_gpu_addr); | ||
3500 | if (r) { | 3367 | if (r) { |
3501 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | 3368 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); |
3502 | dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r); | ||
3503 | gfx_v7_0_rlc_fini(adev); | 3369 | gfx_v7_0_rlc_fini(adev); |
3504 | return r; | 3370 | return r; |
3505 | } | 3371 | } |
3506 | 3372 | ||
3507 | r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); | ||
3508 | if (r) { | ||
3509 | dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r); | ||
3510 | gfx_v7_0_rlc_fini(adev); | ||
3511 | return r; | ||
3512 | } | ||
3513 | /* set up the cs buffer */ | 3373 | /* set up the cs buffer */ |
3514 | dst_ptr = adev->gfx.rlc.cs_ptr; | 3374 | dst_ptr = adev->gfx.rlc.cs_ptr; |
3515 | gfx_v7_0_get_csb_buffer(adev, dst_ptr); | 3375 | gfx_v7_0_get_csb_buffer(adev, dst_ptr); |
@@ -3518,37 +3378,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) | |||
3518 | } | 3378 | } |
3519 | 3379 | ||
3520 | if (adev->gfx.rlc.cp_table_size) { | 3380 | if (adev->gfx.rlc.cp_table_size) { |
3521 | if (adev->gfx.rlc.cp_table_obj == NULL) { | ||
3522 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | ||
3523 | AMDGPU_GEM_DOMAIN_VRAM, | ||
3524 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | ||
3525 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | ||
3526 | NULL, NULL, | ||
3527 | &adev->gfx.rlc.cp_table_obj); | ||
3528 | if (r) { | ||
3529 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
3530 | gfx_v7_0_rlc_fini(adev); | ||
3531 | return r; | ||
3532 | } | ||
3533 | } | ||
3534 | 3381 | ||
3535 | r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); | 3382 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, |
3536 | if (unlikely(r != 0)) { | 3383 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, |
3537 | dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); | 3384 | &adev->gfx.rlc.cp_table_obj, |
3538 | gfx_v7_0_rlc_fini(adev); | 3385 | &adev->gfx.rlc.cp_table_gpu_addr, |
3539 | return r; | 3386 | (void **)&adev->gfx.rlc.cp_table_ptr); |
3540 | } | ||
3541 | r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
3542 | &adev->gfx.rlc.cp_table_gpu_addr); | ||
3543 | if (r) { | ||
3544 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
3545 | dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); | ||
3546 | gfx_v7_0_rlc_fini(adev); | ||
3547 | return r; | ||
3548 | } | ||
3549 | r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); | ||
3550 | if (r) { | 3387 | if (r) { |
3551 | dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); | 3388 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); |
3552 | gfx_v7_0_rlc_fini(adev); | 3389 | gfx_v7_0_rlc_fini(adev); |
3553 | return r; | 3390 | return r; |
3554 | } | 3391 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 05436b8730b4..0710b0b2e4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
@@ -1238,29 +1238,8 @@ static void cz_init_cp_jump_table(struct amdgpu_device *adev) | |||
1238 | 1238 | ||
1239 | static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) | 1239 | static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) |
1240 | { | 1240 | { |
1241 | int r; | 1241 | amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, NULL, NULL); |
1242 | 1242 | amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, NULL, NULL); | |
1243 | /* clear state block */ | ||
1244 | if (adev->gfx.rlc.clear_state_obj) { | ||
1245 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, true); | ||
1246 | if (unlikely(r != 0)) | ||
1247 | dev_warn(adev->dev, "(%d) reserve RLC cbs bo failed\n", r); | ||
1248 | amdgpu_bo_unpin(adev->gfx.rlc.clear_state_obj); | ||
1249 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | ||
1250 | amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); | ||
1251 | adev->gfx.rlc.clear_state_obj = NULL; | ||
1252 | } | ||
1253 | |||
1254 | /* jump table block */ | ||
1255 | if (adev->gfx.rlc.cp_table_obj) { | ||
1256 | r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, true); | ||
1257 | if (unlikely(r != 0)) | ||
1258 | dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); | ||
1259 | amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); | ||
1260 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | ||
1261 | amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); | ||
1262 | adev->gfx.rlc.cp_table_obj = NULL; | ||
1263 | } | ||
1264 | } | 1243 | } |
1265 | 1244 | ||
1266 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | 1245 | static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) |
@@ -1278,39 +1257,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1278 | /* clear state block */ | 1257 | /* clear state block */ |
1279 | adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); | 1258 | adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); |
1280 | 1259 | ||
1281 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 1260 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
1282 | r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, | 1261 | AMDGPU_GEM_DOMAIN_VRAM, |
1283 | AMDGPU_GEM_DOMAIN_VRAM, | 1262 | &adev->gfx.rlc.clear_state_obj, |
1284 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 1263 | &adev->gfx.rlc.clear_state_gpu_addr, |
1285 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 1264 | (void **)&adev->gfx.rlc.cs_ptr); |
1286 | NULL, NULL, | ||
1287 | &adev->gfx.rlc.clear_state_obj); | ||
1288 | if (r) { | ||
1289 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); | ||
1290 | gfx_v8_0_rlc_fini(adev); | ||
1291 | return r; | ||
1292 | } | ||
1293 | } | ||
1294 | r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); | ||
1295 | if (unlikely(r != 0)) { | ||
1296 | gfx_v8_0_rlc_fini(adev); | ||
1297 | return r; | ||
1298 | } | ||
1299 | r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
1300 | &adev->gfx.rlc.clear_state_gpu_addr); | ||
1301 | if (r) { | 1265 | if (r) { |
1302 | amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); | 1266 | dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r); |
1303 | dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r); | ||
1304 | gfx_v8_0_rlc_fini(adev); | 1267 | gfx_v8_0_rlc_fini(adev); |
1305 | return r; | 1268 | return r; |
1306 | } | 1269 | } |
1307 | 1270 | ||
1308 | r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr); | ||
1309 | if (r) { | ||
1310 | dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r); | ||
1311 | gfx_v8_0_rlc_fini(adev); | ||
1312 | return r; | ||
1313 | } | ||
1314 | /* set up the cs buffer */ | 1271 | /* set up the cs buffer */ |
1315 | dst_ptr = adev->gfx.rlc.cs_ptr; | 1272 | dst_ptr = adev->gfx.rlc.cs_ptr; |
1316 | gfx_v8_0_get_csb_buffer(adev, dst_ptr); | 1273 | gfx_v8_0_get_csb_buffer(adev, dst_ptr); |
@@ -1321,34 +1278,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1321 | if ((adev->asic_type == CHIP_CARRIZO) || | 1278 | if ((adev->asic_type == CHIP_CARRIZO) || |
1322 | (adev->asic_type == CHIP_STONEY)) { | 1279 | (adev->asic_type == CHIP_STONEY)) { |
1323 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 1280 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
1324 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 1281 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, |
1325 | r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, | 1282 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, |
1326 | AMDGPU_GEM_DOMAIN_VRAM, | 1283 | &adev->gfx.rlc.cp_table_obj, |
1327 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | | 1284 | &adev->gfx.rlc.cp_table_gpu_addr, |
1328 | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, | 1285 | (void **)&adev->gfx.rlc.cp_table_ptr); |
1329 | NULL, NULL, | ||
1330 | &adev->gfx.rlc.cp_table_obj); | ||
1331 | if (r) { | ||
1332 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); | ||
1333 | return r; | ||
1334 | } | ||
1335 | } | ||
1336 | |||
1337 | r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); | ||
1338 | if (unlikely(r != 0)) { | ||
1339 | dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); | ||
1340 | return r; | ||
1341 | } | ||
1342 | r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, | ||
1343 | &adev->gfx.rlc.cp_table_gpu_addr); | ||
1344 | if (r) { | 1286 | if (r) { |
1345 | amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); | 1287 | dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); |
1346 | dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r); | ||
1347 | return r; | ||
1348 | } | ||
1349 | r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); | ||
1350 | if (r) { | ||
1351 | dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); | ||
1352 | return r; | 1288 | return r; |
1353 | } | 1289 | } |
1354 | 1290 | ||
@@ -1363,17 +1299,7 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) | |||
1363 | 1299 | ||
1364 | static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) | 1300 | static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) |
1365 | { | 1301 | { |
1366 | int r; | 1302 | amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
1367 | |||
1368 | if (adev->gfx.mec.hpd_eop_obj) { | ||
1369 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); | ||
1370 | if (unlikely(r != 0)) | ||
1371 | dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); | ||
1372 | amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); | ||
1373 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | ||
1374 | amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); | ||
1375 | adev->gfx.mec.hpd_eop_obj = NULL; | ||
1376 | } | ||
1377 | } | 1303 | } |
1378 | 1304 | ||
1379 | static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | 1305 | static int gfx_v8_0_mec_init(struct amdgpu_device *adev) |
@@ -1389,34 +1315,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) | |||
1389 | 1315 | ||
1390 | mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; | 1316 | mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; |
1391 | 1317 | ||
1392 | if (adev->gfx.mec.hpd_eop_obj == NULL) { | 1318 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
1393 | r = amdgpu_bo_create(adev, | 1319 | AMDGPU_GEM_DOMAIN_GTT, |
1394 | mec_hpd_size, | 1320 | &adev->gfx.mec.hpd_eop_obj, |
1395 | PAGE_SIZE, true, | 1321 | &adev->gfx.mec.hpd_eop_gpu_addr, |
1396 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | 1322 | (void **)&hpd); |
1397 | &adev->gfx.mec.hpd_eop_obj); | ||
1398 | if (r) { | ||
1399 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | ||
1400 | return r; | ||
1401 | } | ||
1402 | } | ||
1403 | |||
1404 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); | ||
1405 | if (unlikely(r != 0)) { | ||
1406 | gfx_v8_0_mec_fini(adev); | ||
1407 | return r; | ||
1408 | } | ||
1409 | r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
1410 | &adev->gfx.mec.hpd_eop_gpu_addr); | ||
1411 | if (r) { | ||
1412 | dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); | ||
1413 | gfx_v8_0_mec_fini(adev); | ||
1414 | return r; | ||
1415 | } | ||
1416 | r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); | ||
1417 | if (r) { | 1323 | if (r) { |
1418 | dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); | 1324 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); |
1419 | gfx_v8_0_mec_fini(adev); | ||
1420 | return r; | 1325 | return r; |
1421 | } | 1326 | } |
1422 | 1327 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 435db6f5efcf..b39f81dda847 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | |||
@@ -116,7 +116,9 @@ static const u32 golden_settings_gc_9_0[] = | |||
116 | SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080, | 116 | SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UTCL1_CNTL_2), 0x08000000, 0x08000080, |
117 | SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080, | 117 | SOC15_REG_OFFSET(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL), 0x08000000, 0x08000080, |
118 | SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080, | 118 | SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_UTCL1_CNTL), 0x08000000, 0x08000080, |
119 | SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), 0x00001000, 0x00001000, | ||
119 | SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107, | 120 | SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1), 0x0000000f, 0x01000107, |
121 | SOC15_REG_OFFSET(GC, 0, mmSQC_CONFIG), 0x03000000, 0x020a2000, | ||
120 | SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000, | 122 | SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX), 0xfffffeef, 0x010b0000, |
121 | SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68, | 123 | SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_HI), 0xffffffff, 0x4a2c0e68, |
122 | SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197, | 124 | SOC15_REG_OFFSET(GC, 0, mmTCP_CHAN_STEER_LO), 0xffffffff, 0xb5d3f197, |
@@ -772,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | |||
772 | if (cs_data) { | 774 | if (cs_data) { |
773 | /* clear state block */ | 775 | /* clear state block */ |
774 | adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); | 776 | adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); |
775 | if (adev->gfx.rlc.clear_state_obj == NULL) { | 777 | r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE, |
776 | r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, | 778 | AMDGPU_GEM_DOMAIN_VRAM, |
777 | AMDGPU_GEM_DOMAIN_VRAM, | 779 | &adev->gfx.rlc.clear_state_obj, |
778 | &adev->gfx.rlc.clear_state_obj, | 780 | &adev->gfx.rlc.clear_state_gpu_addr, |
779 | &adev->gfx.rlc.clear_state_gpu_addr, | 781 | (void **)&adev->gfx.rlc.cs_ptr); |
780 | (void **)&adev->gfx.rlc.cs_ptr); | 782 | if (r) { |
781 | if (r) { | 783 | dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", |
782 | dev_err(adev->dev, | 784 | r); |
783 | "(%d) failed to create rlc csb bo\n", r); | 785 | gfx_v9_0_rlc_fini(adev); |
784 | gfx_v9_0_rlc_fini(adev); | 786 | return r; |
785 | return r; | ||
786 | } | ||
787 | } | 787 | } |
788 | /* set up the cs buffer */ | 788 | /* set up the cs buffer */ |
789 | dst_ptr = adev->gfx.rlc.cs_ptr; | 789 | dst_ptr = adev->gfx.rlc.cs_ptr; |
@@ -795,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | |||
795 | if (adev->asic_type == CHIP_RAVEN) { | 795 | if (adev->asic_type == CHIP_RAVEN) { |
796 | /* TODO: double check the cp_table_size for RV */ | 796 | /* TODO: double check the cp_table_size for RV */ |
797 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ | 797 | adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ |
798 | if (adev->gfx.rlc.cp_table_obj == NULL) { | 798 | r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size, |
799 | r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size, | 799 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, |
800 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, | 800 | &adev->gfx.rlc.cp_table_obj, |
801 | &adev->gfx.rlc.cp_table_obj, | 801 | &adev->gfx.rlc.cp_table_gpu_addr, |
802 | &adev->gfx.rlc.cp_table_gpu_addr, | 802 | (void **)&adev->gfx.rlc.cp_table_ptr); |
803 | (void **)&adev->gfx.rlc.cp_table_ptr); | 803 | if (r) { |
804 | if (r) { | 804 | dev_err(adev->dev, |
805 | dev_err(adev->dev, | 805 | "(%d) failed to create cp table bo\n", r); |
806 | "(%d) failed to create cp table bo\n", r); | 806 | gfx_v9_0_rlc_fini(adev); |
807 | gfx_v9_0_rlc_fini(adev); | 807 | return r; |
808 | return r; | ||
809 | } | ||
810 | } | 808 | } |
811 | 809 | ||
812 | rv_init_cp_jump_table(adev); | 810 | rv_init_cp_jump_table(adev); |
@@ -821,28 +819,8 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) | |||
821 | 819 | ||
822 | static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) | 820 | static void gfx_v9_0_mec_fini(struct amdgpu_device *adev) |
823 | { | 821 | { |
824 | int r; | 822 | amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL); |
825 | 823 | amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL); | |
826 | if (adev->gfx.mec.hpd_eop_obj) { | ||
827 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, true); | ||
828 | if (unlikely(r != 0)) | ||
829 | dev_warn(adev->dev, "(%d) reserve HPD EOP bo failed\n", r); | ||
830 | amdgpu_bo_unpin(adev->gfx.mec.hpd_eop_obj); | ||
831 | amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); | ||
832 | |||
833 | amdgpu_bo_unref(&adev->gfx.mec.hpd_eop_obj); | ||
834 | adev->gfx.mec.hpd_eop_obj = NULL; | ||
835 | } | ||
836 | if (adev->gfx.mec.mec_fw_obj) { | ||
837 | r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, true); | ||
838 | if (unlikely(r != 0)) | ||
839 | dev_warn(adev->dev, "(%d) reserve mec firmware bo failed\n", r); | ||
840 | amdgpu_bo_unpin(adev->gfx.mec.mec_fw_obj); | ||
841 | amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); | ||
842 | |||
843 | amdgpu_bo_unref(&adev->gfx.mec.mec_fw_obj); | ||
844 | adev->gfx.mec.mec_fw_obj = NULL; | ||
845 | } | ||
846 | } | 824 | } |
847 | 825 | ||
848 | static int gfx_v9_0_mec_init(struct amdgpu_device *adev) | 826 | static int gfx_v9_0_mec_init(struct amdgpu_device *adev) |
@@ -862,33 +840,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) | |||
862 | amdgpu_gfx_compute_queue_acquire(adev); | 840 | amdgpu_gfx_compute_queue_acquire(adev); |
863 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; | 841 | mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; |
864 | 842 | ||
865 | if (adev->gfx.mec.hpd_eop_obj == NULL) { | 843 | r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE, |
866 | r = amdgpu_bo_create(adev, | 844 | AMDGPU_GEM_DOMAIN_GTT, |
867 | mec_hpd_size, | 845 | &adev->gfx.mec.hpd_eop_obj, |
868 | PAGE_SIZE, true, | 846 | &adev->gfx.mec.hpd_eop_gpu_addr, |
869 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | 847 | (void **)&hpd); |
870 | &adev->gfx.mec.hpd_eop_obj); | ||
871 | if (r) { | ||
872 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); | ||
873 | return r; | ||
874 | } | ||
875 | } | ||
876 | |||
877 | r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); | ||
878 | if (unlikely(r != 0)) { | ||
879 | gfx_v9_0_mec_fini(adev); | ||
880 | return r; | ||
881 | } | ||
882 | r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
883 | &adev->gfx.mec.hpd_eop_gpu_addr); | ||
884 | if (r) { | ||
885 | dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); | ||
886 | gfx_v9_0_mec_fini(adev); | ||
887 | return r; | ||
888 | } | ||
889 | r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd); | ||
890 | if (r) { | 848 | if (r) { |
891 | dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r); | 849 | dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r); |
892 | gfx_v9_0_mec_fini(adev); | 850 | gfx_v9_0_mec_fini(adev); |
893 | return r; | 851 | return r; |
894 | } | 852 | } |
@@ -905,42 +863,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) | |||
905 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); | 863 | le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); |
906 | fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; | 864 | fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; |
907 | 865 | ||
908 | if (adev->gfx.mec.mec_fw_obj == NULL) { | 866 | r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes, |
909 | r = amdgpu_bo_create(adev, | 867 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
910 | mec_hdr->header.ucode_size_bytes, | 868 | &adev->gfx.mec.mec_fw_obj, |
911 | PAGE_SIZE, true, | 869 | &adev->gfx.mec.mec_fw_gpu_addr, |
912 | AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, | 870 | (void **)&fw); |
913 | &adev->gfx.mec.mec_fw_obj); | ||
914 | if (r) { | ||
915 | dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); | ||
916 | return r; | ||
917 | } | ||
918 | } | ||
919 | |||
920 | r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false); | ||
921 | if (unlikely(r != 0)) { | ||
922 | gfx_v9_0_mec_fini(adev); | ||
923 | return r; | ||
924 | } | ||
925 | r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT, | ||
926 | &adev->gfx.mec.mec_fw_gpu_addr); | ||
927 | if (r) { | ||
928 | dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r); | ||
929 | gfx_v9_0_mec_fini(adev); | ||
930 | return r; | ||
931 | } | ||
932 | r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw); | ||
933 | if (r) { | 871 | if (r) { |
934 | dev_warn(adev->dev, "(%d) map firmware bo failed\n", r); | 872 | dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); |
935 | gfx_v9_0_mec_fini(adev); | 873 | gfx_v9_0_mec_fini(adev); |
936 | return r; | 874 | return r; |
937 | } | 875 | } |
876 | |||
938 | memcpy(fw, fw_data, fw_size); | 877 | memcpy(fw, fw_data, fw_size); |
939 | 878 | ||
940 | amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); | 879 | amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); |
941 | amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); | 880 | amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); |
942 | 881 | ||
943 | |||
944 | return 0; | 882 | return 0; |
945 | } | 883 | } |
946 | 884 | ||
@@ -4158,7 +4096,7 @@ static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, | |||
4158 | return 0; | 4096 | return 0; |
4159 | } | 4097 | } |
4160 | 4098 | ||
4161 | const struct amd_ip_funcs gfx_v9_0_ip_funcs = { | 4099 | static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { |
4162 | .name = "gfx_v9_0", | 4100 | .name = "gfx_v9_0", |
4163 | .early_init = gfx_v9_0_early_init, | 4101 | .early_init = gfx_v9_0_early_init, |
4164 | .late_init = gfx_v9_0_late_init, | 4102 | .late_init = gfx_v9_0_late_init, |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h index 56ef652a575d..fa5a3fbaf6ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h | |||
@@ -24,7 +24,6 @@ | |||
24 | #ifndef __GFX_V9_0_H__ | 24 | #ifndef __GFX_V9_0_H__ |
25 | #define __GFX_V9_0_H__ | 25 | #define __GFX_V9_0_H__ |
26 | 26 | ||
27 | extern const struct amd_ip_funcs gfx_v9_0_ip_funcs; | ||
28 | extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; | 27 | extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block; |
29 | 28 | ||
30 | void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); | 29 | void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h index d2dbb085f480..206e29cad753 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h | |||
@@ -30,7 +30,5 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, | |||
30 | bool value); | 30 | bool value); |
31 | void gfxhub_v1_0_init(struct amdgpu_device *adev); | 31 | void gfxhub_v1_0_init(struct amdgpu_device *adev); |
32 | u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); | 32 | u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev); |
33 | extern const struct amd_ip_funcs gfxhub_v1_0_ip_funcs; | ||
34 | extern const struct amdgpu_ip_block_version gfxhub_v1_0_ip_block; | ||
35 | 33 | ||
36 | #endif | 34 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index 57bb940c0ecd..5d38229baf69 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h | |||
@@ -36,7 +36,4 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev); | |||
36 | void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, | 36 | void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, |
37 | bool enable); | 37 | bool enable); |
38 | 38 | ||
39 | extern const struct amd_ip_funcs mmhub_v1_0_ip_funcs; | ||
40 | extern const struct amdgpu_ip_block_version mmhub_v1_0_ip_block; | ||
41 | |||
42 | #endif | 39 | #endif |
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 591f3e7fb508..fd7c72aaafa6 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
@@ -291,6 +291,8 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
291 | 291 | ||
292 | DRM_DEBUG("Setting write pointer\n"); | 292 | DRM_DEBUG("Setting write pointer\n"); |
293 | if (ring->use_doorbell) { | 293 | if (ring->use_doorbell) { |
294 | u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs]; | ||
295 | |||
294 | DRM_DEBUG("Using doorbell -- " | 296 | DRM_DEBUG("Using doorbell -- " |
295 | "wptr_offs == 0x%08x " | 297 | "wptr_offs == 0x%08x " |
296 | "lower_32_bits(ring->wptr) << 2 == 0x%08x " | 298 | "lower_32_bits(ring->wptr) << 2 == 0x%08x " |
@@ -299,8 +301,7 @@ static void sdma_v4_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
299 | lower_32_bits(ring->wptr << 2), | 301 | lower_32_bits(ring->wptr << 2), |
300 | upper_32_bits(ring->wptr << 2)); | 302 | upper_32_bits(ring->wptr << 2)); |
301 | /* XXX check if swapping is necessary on BE */ | 303 | /* XXX check if swapping is necessary on BE */ |
302 | adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr << 2); | 304 | WRITE_ONCE(*wb, (ring->wptr << 2)); |
303 | adev->wb.wb[ring->wptr_offs + 1] = upper_32_bits(ring->wptr << 2); | ||
304 | DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", | 305 | DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n", |
305 | ring->doorbell_index, ring->wptr << 2); | 306 | ring->doorbell_index, ring->wptr << 2); |
306 | WDOORBELL64(ring->doorbell_index, ring->wptr << 2); | 307 | WDOORBELL64(ring->doorbell_index, ring->wptr << 2); |
@@ -573,12 +574,13 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable) | |||
573 | static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) | 574 | static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) |
574 | { | 575 | { |
575 | struct amdgpu_ring *ring; | 576 | struct amdgpu_ring *ring; |
576 | u32 rb_cntl, ib_cntl; | 577 | u32 rb_cntl, ib_cntl, wptr_poll_cntl; |
577 | u32 rb_bufsz; | 578 | u32 rb_bufsz; |
578 | u32 wb_offset; | 579 | u32 wb_offset; |
579 | u32 doorbell; | 580 | u32 doorbell; |
580 | u32 doorbell_offset; | 581 | u32 doorbell_offset; |
581 | u32 temp; | 582 | u32 temp; |
583 | u64 wptr_gpu_addr; | ||
582 | int i, r; | 584 | int i, r; |
583 | 585 | ||
584 | for (i = 0; i < adev->sdma.num_instances; i++) { | 586 | for (i = 0; i < adev->sdma.num_instances; i++) { |
@@ -660,6 +662,19 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) | |||
660 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp); | 662 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_F32_CNTL), temp); |
661 | } | 663 | } |
662 | 664 | ||
665 | /* setup the wptr shadow polling */ | ||
666 | wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); | ||
667 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO), | ||
668 | lower_32_bits(wptr_gpu_addr)); | ||
669 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI), | ||
670 | upper_32_bits(wptr_gpu_addr)); | ||
671 | wptr_poll_cntl = RREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL)); | ||
672 | if (amdgpu_sriov_vf(adev)) | ||
673 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1); | ||
674 | else | ||
675 | wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0); | ||
676 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), wptr_poll_cntl); | ||
677 | |||
663 | /* enable DMA RB */ | 678 | /* enable DMA RB */ |
664 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); | 679 | rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); |
665 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl); | 680 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_GFX_RB_CNTL), rb_cntl); |
@@ -687,6 +702,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev) | |||
687 | 702 | ||
688 | if (adev->mman.buffer_funcs_ring == ring) | 703 | if (adev->mman.buffer_funcs_ring == ring) |
689 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); | 704 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); |
705 | |||
690 | } | 706 | } |
691 | 707 | ||
692 | return 0; | 708 | return 0; |
@@ -783,15 +799,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) | |||
783 | const struct sdma_firmware_header_v1_0 *hdr; | 799 | const struct sdma_firmware_header_v1_0 *hdr; |
784 | const __le32 *fw_data; | 800 | const __le32 *fw_data; |
785 | u32 fw_size; | 801 | u32 fw_size; |
786 | u32 digest_size = 0; | ||
787 | int i, j; | 802 | int i, j; |
788 | 803 | ||
789 | /* halt the MEs */ | 804 | /* halt the MEs */ |
790 | sdma_v4_0_enable(adev, false); | 805 | sdma_v4_0_enable(adev, false); |
791 | 806 | ||
792 | for (i = 0; i < adev->sdma.num_instances; i++) { | 807 | for (i = 0; i < adev->sdma.num_instances; i++) { |
793 | uint16_t version_major; | ||
794 | uint16_t version_minor; | ||
795 | if (!adev->sdma.instance[i].fw) | 808 | if (!adev->sdma.instance[i].fw) |
796 | return -EINVAL; | 809 | return -EINVAL; |
797 | 810 | ||
@@ -799,23 +812,12 @@ static int sdma_v4_0_load_microcode(struct amdgpu_device *adev) | |||
799 | amdgpu_ucode_print_sdma_hdr(&hdr->header); | 812 | amdgpu_ucode_print_sdma_hdr(&hdr->header); |
800 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; | 813 | fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
801 | 814 | ||
802 | version_major = le16_to_cpu(hdr->header.header_version_major); | ||
803 | version_minor = le16_to_cpu(hdr->header.header_version_minor); | ||
804 | |||
805 | if (version_major == 1 && version_minor >= 1) { | ||
806 | const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr = (const struct sdma_firmware_header_v1_1 *) hdr; | ||
807 | digest_size = le32_to_cpu(sdma_v1_1_hdr->digest_size); | ||
808 | } | ||
809 | |||
810 | fw_size -= digest_size; | ||
811 | |||
812 | fw_data = (const __le32 *) | 815 | fw_data = (const __le32 *) |
813 | (adev->sdma.instance[i].fw->data + | 816 | (adev->sdma.instance[i].fw->data + |
814 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | 817 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
815 | 818 | ||
816 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0); | 819 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_ADDR), 0); |
817 | 820 | ||
818 | |||
819 | for (j = 0; j < fw_size; j++) | 821 | for (j = 0; j < fw_size; j++) |
820 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); | 822 | WREG32(sdma_v4_0_get_reg_offset(i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++)); |
821 | 823 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 987b958368ac..23a85750edd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | |||
@@ -165,6 +165,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) | |||
165 | unsigned i; | 165 | unsigned i; |
166 | int r; | 166 | int r; |
167 | 167 | ||
168 | if (amdgpu_sriov_vf(adev)) | ||
169 | return 0; | ||
170 | |||
168 | r = amdgpu_ring_alloc(ring, 16); | 171 | r = amdgpu_ring_alloc(ring, 16); |
169 | if (r) { | 172 | if (r) { |
170 | DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", | 173 | DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", |
@@ -432,13 +435,19 @@ static int uvd_v7_0_sw_init(void *handle) | |||
432 | return r; | 435 | return r; |
433 | } | 436 | } |
434 | 437 | ||
435 | |||
436 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { | 438 | for (i = 0; i < adev->uvd.num_enc_rings; ++i) { |
437 | ring = &adev->uvd.ring_enc[i]; | 439 | ring = &adev->uvd.ring_enc[i]; |
438 | sprintf(ring->name, "uvd_enc%d", i); | 440 | sprintf(ring->name, "uvd_enc%d", i); |
439 | if (amdgpu_sriov_vf(adev)) { | 441 | if (amdgpu_sriov_vf(adev)) { |
440 | ring->use_doorbell = true; | 442 | ring->use_doorbell = true; |
441 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; | 443 | |
444 | /* currently only use the first enconding ring for | ||
445 | * sriov, so set unused location for other unused rings. | ||
446 | */ | ||
447 | if (i == 0) | ||
448 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; | ||
449 | else | ||
450 | ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; | ||
442 | } | 451 | } |
443 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); | 452 | r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); |
444 | if (r) | 453 | if (r) |
@@ -685,6 +694,11 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, | |||
685 | /* 4, set resp to zero */ | 694 | /* 4, set resp to zero */ |
686 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); | 695 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); |
687 | 696 | ||
697 | WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); | ||
698 | adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; | ||
699 | adev->uvd.ring_enc[0].wptr = 0; | ||
700 | adev->uvd.ring_enc[0].wptr_old = 0; | ||
701 | |||
688 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ | 702 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ |
689 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); | 703 | WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); |
690 | 704 | ||
@@ -702,7 +716,6 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, | |||
702 | dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); | 716 | dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); |
703 | return -EBUSY; | 717 | return -EBUSY; |
704 | } | 718 | } |
705 | WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); | ||
706 | 719 | ||
707 | return 0; | 720 | return 0; |
708 | } | 721 | } |
@@ -736,11 +749,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
736 | init_table += header->uvd_table_offset; | 749 | init_table += header->uvd_table_offset; |
737 | 750 | ||
738 | ring = &adev->uvd.ring; | 751 | ring = &adev->uvd.ring; |
752 | ring->wptr = 0; | ||
739 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); | 753 | size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); |
740 | 754 | ||
741 | /* disable clock gating */ | ||
742 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), | ||
743 | ~UVD_POWER_STATUS__UVD_PG_MODE_MASK, 0); | ||
744 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), | 755 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), |
745 | 0xFFFFFFFF, 0x00000004); | 756 | 0xFFFFFFFF, 0x00000004); |
746 | /* mc resume*/ | 757 | /* mc resume*/ |
@@ -777,12 +788,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
777 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), | 788 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), |
778 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); | 789 | AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); |
779 | 790 | ||
780 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_ADDR_CONFIG), | ||
781 | adev->gfx.config.gb_addr_config); | ||
782 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG), | ||
783 | adev->gfx.config.gb_addr_config); | ||
784 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG), | ||
785 | adev->gfx.config.gb_addr_config); | ||
786 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); | 791 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); |
787 | /* mc resume end*/ | 792 | /* mc resume end*/ |
788 | 793 | ||
@@ -819,17 +824,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
819 | UVD_LMI_CTRL__REQ_MODE_MASK | | 824 | UVD_LMI_CTRL__REQ_MODE_MASK | |
820 | 0x00100000L)); | 825 | 0x00100000L)); |
821 | 826 | ||
822 | /* disable byte swapping */ | ||
823 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_SWAP_CNTL), 0); | ||
824 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MP_SWAP_CNTL), 0); | ||
825 | |||
826 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA0), 0x40c2040); | ||
827 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXA1), 0x0); | ||
828 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB0), 0x40c2040); | ||
829 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUXB1), 0x0); | ||
830 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_ALU), 0); | ||
831 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MPC_SET_MUX), 0x88); | ||
832 | |||
833 | /* take all subblocks out of reset, except VCPU */ | 827 | /* take all subblocks out of reset, except VCPU */ |
834 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), | 828 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), |
835 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); | 829 | UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); |
@@ -838,15 +832,6 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
838 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), | 832 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), |
839 | UVD_VCPU_CNTL__CLK_EN_MASK); | 833 | UVD_VCPU_CNTL__CLK_EN_MASK); |
840 | 834 | ||
841 | /* enable UMC */ | ||
842 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), | ||
843 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); | ||
844 | |||
845 | /* boot up the VCPU */ | ||
846 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); | ||
847 | |||
848 | MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); | ||
849 | |||
850 | /* enable master interrupt */ | 835 | /* enable master interrupt */ |
851 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), | 836 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), |
852 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), | 837 | ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), |
@@ -859,40 +844,31 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) | |||
859 | /* force RBC into idle state */ | 844 | /* force RBC into idle state */ |
860 | size = order_base_2(ring->ring_size); | 845 | size = order_base_2(ring->ring_size); |
861 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); | 846 | tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); |
862 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); | ||
863 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); | 847 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); |
864 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); | ||
865 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); | ||
866 | tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); | ||
867 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); | 848 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); |
868 | 849 | ||
869 | /* set the write pointer delay */ | ||
870 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL), 0); | ||
871 | |||
872 | /* set the wb address */ | ||
873 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR), | ||
874 | (upper_32_bits(ring->gpu_addr) >> 2)); | ||
875 | |||
876 | /* programm the RB_BASE for ring buffer */ | ||
877 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), | ||
878 | lower_32_bits(ring->gpu_addr)); | ||
879 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), | ||
880 | upper_32_bits(ring->gpu_addr)); | ||
881 | |||
882 | ring->wptr = 0; | ||
883 | ring = &adev->uvd.ring_enc[0]; | 850 | ring = &adev->uvd.ring_enc[0]; |
851 | ring->wptr = 0; | ||
884 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); | 852 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); |
885 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); | 853 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); |
886 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); | 854 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); |
887 | 855 | ||
856 | /* boot up the VCPU */ | ||
857 | MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); | ||
858 | |||
859 | /* enable UMC */ | ||
860 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), | ||
861 | ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); | ||
862 | |||
863 | MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); | ||
864 | |||
888 | /* add end packet */ | 865 | /* add end packet */ |
889 | memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); | 866 | memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); |
890 | table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; | 867 | table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; |
891 | header->uvd_table_size = table_size; | 868 | header->uvd_table_size = table_size; |
892 | 869 | ||
893 | return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); | ||
894 | } | 870 | } |
895 | return -EINVAL; /* already initializaed ? */ | 871 | return uvd_v7_0_mmsch_start(adev, &adev->virt.mm_table); |
896 | } | 872 | } |
897 | 873 | ||
898 | /** | 874 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 1ecd6bb90c1f..11134d5f7443 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | |||
@@ -173,6 +173,11 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, | |||
173 | /* 4, set resp to zero */ | 173 | /* 4, set resp to zero */ |
174 | WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0); | 174 | WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP), 0); |
175 | 175 | ||
176 | WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); | ||
177 | adev->wb.wb[adev->vce.ring[0].wptr_offs] = 0; | ||
178 | adev->vce.ring[0].wptr = 0; | ||
179 | adev->vce.ring[0].wptr_old = 0; | ||
180 | |||
176 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ | 181 | /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ |
177 | WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001); | 182 | WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST), 0x10000001); |
178 | 183 | ||
@@ -190,7 +195,6 @@ static int vce_v4_0_mmsch_start(struct amdgpu_device *adev, | |||
190 | dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); | 195 | dev_err(adev->dev, "failed to init MMSCH, mmVCE_MMSCH_VF_MAILBOX_RESP = %x\n", data); |
191 | return -EBUSY; | 196 | return -EBUSY; |
192 | } | 197 | } |
193 | WDOORBELL32(adev->vce.ring[0].doorbell_index, 0); | ||
194 | 198 | ||
195 | return 0; | 199 | return 0; |
196 | } | 200 | } |
@@ -274,7 +278,8 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) | |||
274 | 278 | ||
275 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0); | 279 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_LMI_CTRL2), ~0x100, 0); |
276 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), | 280 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), |
277 | 0xffffffff, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); | 281 | VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK, |
282 | VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); | ||
278 | 283 | ||
279 | /* end of MC_RESUME */ | 284 | /* end of MC_RESUME */ |
280 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), | 285 | MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), |
@@ -296,11 +301,9 @@ static int vce_v4_0_sriov_start(struct amdgpu_device *adev) | |||
296 | memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); | 301 | memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); |
297 | table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; | 302 | table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; |
298 | header->vce_table_size = table_size; | 303 | header->vce_table_size = table_size; |
299 | |||
300 | return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); | ||
301 | } | 304 | } |
302 | 305 | ||
303 | return -EINVAL; /* already initializaed ? */ | 306 | return vce_v4_0_mmsch_start(adev, &adev->virt.mm_table); |
304 | } | 307 | } |
305 | 308 | ||
306 | /** | 309 | /** |
@@ -443,12 +446,14 @@ static int vce_v4_0_sw_init(void *handle) | |||
443 | if (amdgpu_sriov_vf(adev)) { | 446 | if (amdgpu_sriov_vf(adev)) { |
444 | /* DOORBELL only works under SRIOV */ | 447 | /* DOORBELL only works under SRIOV */ |
445 | ring->use_doorbell = true; | 448 | ring->use_doorbell = true; |
449 | |||
450 | /* currently only use the first encoding ring for sriov, | ||
451 | * so set unused location for other unused rings. | ||
452 | */ | ||
446 | if (i == 0) | 453 | if (i == 0) |
447 | ring->doorbell_index = AMDGPU_DOORBELL64_RING0_1 * 2; | 454 | ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING0_1 * 2; |
448 | else if (i == 1) | ||
449 | ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2; | ||
450 | else | 455 | else |
451 | ring->doorbell_index = AMDGPU_DOORBELL64_RING2_3 * 2 + 1; | 456 | ring->doorbell_index = AMDGPU_DOORBELL64_VCE_RING2_3 * 2 + 1; |
452 | } | 457 | } |
453 | r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); | 458 | r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0); |
454 | if (r) | 459 | if (r) |
@@ -990,11 +995,13 @@ static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, | |||
990 | { | 995 | { |
991 | uint32_t val = 0; | 996 | uint32_t val = 0; |
992 | 997 | ||
993 | if (state == AMDGPU_IRQ_STATE_ENABLE) | 998 | if (!amdgpu_sriov_vf(adev)) { |
994 | val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; | 999 | if (state == AMDGPU_IRQ_STATE_ENABLE) |
1000 | val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK; | ||
995 | 1001 | ||
996 | WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, | 1002 | WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_EN), val, |
997 | ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); | 1003 | ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK); |
1004 | } | ||
998 | return 0; | 1005 | return 0; |
999 | } | 1006 | } |
1000 | 1007 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 0b74da3dca8b..bc839ff0bdd0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c | |||
@@ -1240,13 +1240,18 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr) | |||
1240 | { | 1240 | { |
1241 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1241 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1242 | 1242 | ||
1243 | if (cz_hwmgr->sclk_dpm.soft_min_clk != | 1243 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
1244 | cz_hwmgr->sclk_dpm.soft_max_clk) | 1244 | PPSMC_MSG_SetSclkSoftMin, |
1245 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | 1245 | cz_get_sclk_level(hwmgr, |
1246 | PPSMC_MSG_SetSclkSoftMin, | 1246 | cz_hwmgr->sclk_dpm.soft_max_clk, |
1247 | cz_get_sclk_level(hwmgr, | 1247 | PPSMC_MSG_SetSclkSoftMin)); |
1248 | cz_hwmgr->sclk_dpm.soft_max_clk, | 1248 | |
1249 | PPSMC_MSG_SetSclkSoftMin)); | 1249 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
1250 | PPSMC_MSG_SetSclkSoftMax, | ||
1251 | cz_get_sclk_level(hwmgr, | ||
1252 | cz_hwmgr->sclk_dpm.soft_max_clk, | ||
1253 | PPSMC_MSG_SetSclkSoftMax)); | ||
1254 | |||
1250 | return 0; | 1255 | return 0; |
1251 | } | 1256 | } |
1252 | 1257 | ||
@@ -1292,17 +1297,55 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) | |||
1292 | { | 1297 | { |
1293 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); | 1298 | struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); |
1294 | 1299 | ||
1295 | if (cz_hwmgr->sclk_dpm.soft_min_clk != | 1300 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
1296 | cz_hwmgr->sclk_dpm.soft_max_clk) { | 1301 | PPSMC_MSG_SetSclkSoftMax, |
1297 | cz_hwmgr->sclk_dpm.soft_max_clk = | 1302 | cz_get_sclk_level(hwmgr, |
1298 | cz_hwmgr->sclk_dpm.soft_min_clk; | 1303 | cz_hwmgr->sclk_dpm.soft_min_clk, |
1304 | PPSMC_MSG_SetSclkSoftMax)); | ||
1299 | 1305 | ||
1300 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | 1306 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
1307 | PPSMC_MSG_SetSclkSoftMin, | ||
1308 | cz_get_sclk_level(hwmgr, | ||
1309 | cz_hwmgr->sclk_dpm.soft_min_clk, | ||
1310 | PPSMC_MSG_SetSclkSoftMin)); | ||
1311 | |||
1312 | return 0; | ||
1313 | } | ||
1314 | |||
1315 | static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk) | ||
1316 | { | ||
1317 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
1318 | PPSMC_MSG_SetSclkSoftMin, | ||
1319 | cz_get_sclk_level(hwmgr, | ||
1320 | sclk, | ||
1321 | PPSMC_MSG_SetSclkSoftMin)); | ||
1322 | |||
1323 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
1301 | PPSMC_MSG_SetSclkSoftMax, | 1324 | PPSMC_MSG_SetSclkSoftMax, |
1302 | cz_get_sclk_level(hwmgr, | 1325 | cz_get_sclk_level(hwmgr, |
1303 | cz_hwmgr->sclk_dpm.soft_max_clk, | 1326 | sclk, |
1304 | PPSMC_MSG_SetSclkSoftMax)); | 1327 | PPSMC_MSG_SetSclkSoftMax)); |
1328 | return 0; | ||
1329 | } | ||
1330 | |||
1331 | static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk) | ||
1332 | { | ||
1333 | struct phm_clock_voltage_dependency_table *table = | ||
1334 | hwmgr->dyn_state.vddc_dependency_on_sclk; | ||
1335 | int32_t tmp_sclk; | ||
1336 | int32_t count; | ||
1337 | |||
1338 | tmp_sclk = table->entries[table->count-1].clk * 70 / 100; | ||
1339 | |||
1340 | for (count = table->count-1; count >= 0; count--) { | ||
1341 | if (tmp_sclk >= table->entries[count].clk) { | ||
1342 | tmp_sclk = table->entries[count].clk; | ||
1343 | *sclk = tmp_sclk; | ||
1344 | break; | ||
1345 | } | ||
1305 | } | 1346 | } |
1347 | if (count < 0) | ||
1348 | *sclk = table->entries[0].clk; | ||
1306 | 1349 | ||
1307 | return 0; | 1350 | return 0; |
1308 | } | 1351 | } |
@@ -1310,30 +1353,70 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr) | |||
1310 | static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, | 1353 | static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, |
1311 | enum amd_dpm_forced_level level) | 1354 | enum amd_dpm_forced_level level) |
1312 | { | 1355 | { |
1356 | uint32_t sclk = 0; | ||
1313 | int ret = 0; | 1357 | int ret = 0; |
1358 | uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | | ||
1359 | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | | ||
1360 | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; | ||
1361 | |||
1362 | if (level == hwmgr->dpm_level) | ||
1363 | return ret; | ||
1364 | |||
1365 | if (!(hwmgr->dpm_level & profile_mode_mask)) { | ||
1366 | /* enter profile mode, save current level, disable gfx cg*/ | ||
1367 | if (level & profile_mode_mask) { | ||
1368 | hwmgr->saved_dpm_level = hwmgr->dpm_level; | ||
1369 | cgs_set_clockgating_state(hwmgr->device, | ||
1370 | AMD_IP_BLOCK_TYPE_GFX, | ||
1371 | AMD_CG_STATE_UNGATE); | ||
1372 | } | ||
1373 | } else { | ||
1374 | /* exit profile mode, restore level, enable gfx cg*/ | ||
1375 | if (!(level & profile_mode_mask)) { | ||
1376 | if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) | ||
1377 | level = hwmgr->saved_dpm_level; | ||
1378 | cgs_set_clockgating_state(hwmgr->device, | ||
1379 | AMD_IP_BLOCK_TYPE_GFX, | ||
1380 | AMD_CG_STATE_GATE); | ||
1381 | } | ||
1382 | } | ||
1314 | 1383 | ||
1315 | switch (level) { | 1384 | switch (level) { |
1316 | case AMD_DPM_FORCED_LEVEL_HIGH: | 1385 | case AMD_DPM_FORCED_LEVEL_HIGH: |
1386 | case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: | ||
1317 | ret = cz_phm_force_dpm_highest(hwmgr); | 1387 | ret = cz_phm_force_dpm_highest(hwmgr); |
1318 | if (ret) | 1388 | if (ret) |
1319 | return ret; | 1389 | return ret; |
1390 | hwmgr->dpm_level = level; | ||
1320 | break; | 1391 | break; |
1321 | case AMD_DPM_FORCED_LEVEL_LOW: | 1392 | case AMD_DPM_FORCED_LEVEL_LOW: |
1393 | case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: | ||
1322 | ret = cz_phm_force_dpm_lowest(hwmgr); | 1394 | ret = cz_phm_force_dpm_lowest(hwmgr); |
1323 | if (ret) | 1395 | if (ret) |
1324 | return ret; | 1396 | return ret; |
1397 | hwmgr->dpm_level = level; | ||
1325 | break; | 1398 | break; |
1326 | case AMD_DPM_FORCED_LEVEL_AUTO: | 1399 | case AMD_DPM_FORCED_LEVEL_AUTO: |
1327 | ret = cz_phm_unforce_dpm_levels(hwmgr); | 1400 | ret = cz_phm_unforce_dpm_levels(hwmgr); |
1328 | if (ret) | 1401 | if (ret) |
1329 | return ret; | 1402 | return ret; |
1403 | hwmgr->dpm_level = level; | ||
1404 | break; | ||
1405 | case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: | ||
1406 | ret = cz_get_profiling_clk(hwmgr, &sclk); | ||
1407 | if (ret) | ||
1408 | return ret; | ||
1409 | hwmgr->dpm_level = level; | ||
1410 | cz_phm_force_dpm_sclk(hwmgr, sclk); | ||
1411 | break; | ||
1412 | case AMD_DPM_FORCED_LEVEL_MANUAL: | ||
1413 | hwmgr->dpm_level = level; | ||
1330 | break; | 1414 | break; |
1415 | case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: | ||
1331 | default: | 1416 | default: |
1332 | break; | 1417 | break; |
1333 | } | 1418 | } |
1334 | 1419 | ||
1335 | hwmgr->dpm_level = level; | ||
1336 | |||
1337 | return ret; | 1420 | return ret; |
1338 | } | 1421 | } |
1339 | 1422 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index d025653c7823..9547f265a8bb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
@@ -557,9 +557,8 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u | |||
557 | return vddci_table->entries[i].value; | 557 | return vddci_table->entries[i].value; |
558 | } | 558 | } |
559 | 559 | ||
560 | PP_ASSERT_WITH_CODE(false, | 560 | pr_debug("vddci is larger than max value in vddci_table\n"); |
561 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 561 | return vddci_table->entries[i-1].value; |
562 | return vddci_table->entries[i-1].value); | ||
563 | } | 562 | } |
564 | 563 | ||
565 | int phm_find_boot_level(void *table, | 564 | int phm_find_boot_level(void *table, |
@@ -583,26 +582,26 @@ int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, | |||
583 | phm_ppt_v1_voltage_lookup_table *lookup_table, | 582 | phm_ppt_v1_voltage_lookup_table *lookup_table, |
584 | uint16_t virtual_voltage_id, int32_t *sclk) | 583 | uint16_t virtual_voltage_id, int32_t *sclk) |
585 | { | 584 | { |
586 | uint8_t entryId; | 585 | uint8_t entry_id; |
587 | uint8_t voltageId; | 586 | uint8_t voltage_id; |
588 | struct phm_ppt_v1_information *table_info = | 587 | struct phm_ppt_v1_information *table_info = |
589 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 588 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
590 | 589 | ||
591 | PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); | 590 | PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); |
592 | 591 | ||
593 | /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ | 592 | /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ |
594 | for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { | 593 | for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) { |
595 | voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; | 594 | voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd; |
596 | if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) | 595 | if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) |
597 | break; | 596 | break; |
598 | } | 597 | } |
599 | 598 | ||
600 | PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, | 599 | if (entry_id >= table_info->vdd_dep_on_sclk->count) { |
601 | "Can't find requested voltage id in vdd_dep_on_sclk table!", | 600 | pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n"); |
602 | return -EINVAL; | 601 | return -EINVAL; |
603 | ); | 602 | } |
604 | 603 | ||
605 | *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; | 604 | *sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk; |
606 | 605 | ||
607 | return 0; | 606 | return 0; |
608 | } | 607 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index cd33eb179db2..c062844b15f3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | |||
@@ -142,7 +142,7 @@ int pp_atomfwctrl_get_voltage_table_v4(struct pp_hwmgr *hwmgr, | |||
142 | } | 142 | } |
143 | } else if (voltage_mode == VOLTAGE_OBJ_SVID2) { | 143 | } else if (voltage_mode == VOLTAGE_OBJ_SVID2) { |
144 | voltage_table->psi1_enable = | 144 | voltage_table->psi1_enable = |
145 | voltage_object->svid2_voltage_obj.loadline_psi1 & 0x1; | 145 | (voltage_object->svid2_voltage_obj.loadline_psi1 & 0x20) >> 5; |
146 | voltage_table->psi0_enable = | 146 | voltage_table->psi0_enable = |
147 | voltage_object->svid2_voltage_obj.psi0_enable & 0x1; | 147 | voltage_object->svid2_voltage_obj.psi0_enable & 0x1; |
148 | voltage_table->max_vid_step = | 148 | voltage_table->max_vid_step = |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c index 4c7f430b36eb..edc5fb6412d9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c | |||
@@ -265,6 +265,15 @@ static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input, | |||
265 | } | 265 | } |
266 | } */ | 266 | } */ |
267 | 267 | ||
268 | if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) || | ||
269 | ((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) { | ||
270 | rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100; | ||
271 | rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100; | ||
272 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | ||
273 | PPSMC_MSG_SetSoftMinVcn, | ||
274 | (rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min); | ||
275 | } | ||
276 | |||
268 | if((hwmgr->gfx_arbiter.sclk_hard_min != 0) && | 277 | if((hwmgr->gfx_arbiter.sclk_hard_min != 0) && |
269 | ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) { | 278 | ((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) { |
270 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, | 279 | smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h index afb852295a15..2472b50e54cf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h | |||
@@ -280,6 +280,8 @@ struct rv_hwmgr { | |||
280 | 280 | ||
281 | uint32_t f_actual_hard_min_freq; | 281 | uint32_t f_actual_hard_min_freq; |
282 | uint32_t fabric_actual_soft_min_freq; | 282 | uint32_t fabric_actual_soft_min_freq; |
283 | uint32_t vclk_soft_min; | ||
284 | uint32_t dclk_soft_min; | ||
283 | uint32_t gfx_actual_soft_min_freq; | 285 | uint32_t gfx_actual_soft_min_freq; |
284 | 286 | ||
285 | bool vcn_power_gated; | 287 | bool vcn_power_gated; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f01cda93f178..c2743233ba10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -1962,9 +1962,6 @@ static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr) | |||
1962 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); | 1962 | temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1); |
1963 | break; | 1963 | break; |
1964 | default: | 1964 | default: |
1965 | PP_ASSERT_WITH_CODE(0, | ||
1966 | "Failed to setup PCC HW register! Wrong GPIO assigned for VDDC_PCC_GPIO_PINID!", | ||
1967 | ); | ||
1968 | break; | 1965 | break; |
1969 | } | 1966 | } |
1970 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); | 1967 | cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 01ff5054041b..9d71a259d97d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
@@ -2313,7 +2313,7 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr) | |||
2313 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); | 2313 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg); |
2314 | 2314 | ||
2315 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); | 2315 | smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc); |
2316 | vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);; | 2316 | vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response); |
2317 | 2317 | ||
2318 | if (1 == agc_btc_response) { | 2318 | if (1 == agc_btc_response) { |
2319 | if (1 == data->acg_loop_state) | 2319 | if (1 == data->acg_loop_state) |
@@ -2522,6 +2522,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) | |||
2522 | pp_table->DisplayDpmVoltageMode = | 2522 | pp_table->DisplayDpmVoltageMode = |
2523 | (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); | 2523 | (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); |
2524 | 2524 | ||
2525 | data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable; | ||
2526 | data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable; | ||
2527 | |||
2525 | if (data->registry_data.ulv_support && | 2528 | if (data->registry_data.ulv_support && |
2526 | table_info->us_ulv_voltage_offset) { | 2529 | table_info->us_ulv_voltage_offset) { |
2527 | result = vega10_populate_ulv_state(hwmgr); | 2530 | result = vega10_populate_ulv_state(hwmgr); |
@@ -3701,10 +3704,22 @@ static void vega10_apply_dal_minimum_voltage_request( | |||
3701 | return; | 3704 | return; |
3702 | } | 3705 | } |
3703 | 3706 | ||
3707 | static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr) | ||
3708 | { | ||
3709 | struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk; | ||
3710 | struct phm_ppt_v2_information *table_info = | ||
3711 | (struct phm_ppt_v2_information *)(hwmgr->pptable); | ||
3712 | |||
3713 | vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk; | ||
3714 | |||
3715 | return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1; | ||
3716 | } | ||
3717 | |||
3704 | static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) | 3718 | static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) |
3705 | { | 3719 | { |
3706 | struct vega10_hwmgr *data = | 3720 | struct vega10_hwmgr *data = |
3707 | (struct vega10_hwmgr *)(hwmgr->backend); | 3721 | (struct vega10_hwmgr *)(hwmgr->backend); |
3722 | uint32_t socclk_idx; | ||
3708 | 3723 | ||
3709 | vega10_apply_dal_minimum_voltage_request(hwmgr); | 3724 | vega10_apply_dal_minimum_voltage_request(hwmgr); |
3710 | 3725 | ||
@@ -3725,13 +3740,22 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) | |||
3725 | if (!data->registry_data.mclk_dpm_key_disabled) { | 3740 | if (!data->registry_data.mclk_dpm_key_disabled) { |
3726 | if (data->smc_state_table.mem_boot_level != | 3741 | if (data->smc_state_table.mem_boot_level != |
3727 | data->dpm_table.mem_table.dpm_state.soft_min_level) { | 3742 | data->dpm_table.mem_table.dpm_state.soft_min_level) { |
3743 | if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { | ||
3744 | socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); | ||
3728 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( | 3745 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( |
3729 | hwmgr->smumgr, | 3746 | hwmgr->smumgr, |
3730 | PPSMC_MSG_SetSoftMinUclkByIndex, | 3747 | PPSMC_MSG_SetSoftMinSocclkByIndex, |
3731 | data->smc_state_table.mem_boot_level), | 3748 | socclk_idx), |
3732 | "Failed to set soft min mclk index!", | 3749 | "Failed to set soft min uclk index!", |
3733 | return -EINVAL); | 3750 | return -EINVAL); |
3734 | 3751 | } else { | |
3752 | PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( | ||
3753 | hwmgr->smumgr, | ||
3754 | PPSMC_MSG_SetSoftMinUclkByIndex, | ||
3755 | data->smc_state_table.mem_boot_level), | ||
3756 | "Failed to set soft min uclk index!", | ||
3757 | return -EINVAL); | ||
3758 | } | ||
3735 | data->dpm_table.mem_table.dpm_state.soft_min_level = | 3759 | data->dpm_table.mem_table.dpm_state.soft_min_level = |
3736 | data->smc_state_table.mem_boot_level; | 3760 | data->smc_state_table.mem_boot_level; |
3737 | } | 3761 | } |
@@ -4138,7 +4162,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( | |||
4138 | pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); | 4162 | pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); |
4139 | } | 4163 | } |
4140 | } else { | 4164 | } else { |
4141 | pr_info("Cannot find requested DCEFCLK!"); | 4165 | pr_debug("Cannot find requested DCEFCLK!"); |
4142 | } | 4166 | } |
4143 | 4167 | ||
4144 | if (min_clocks.memoryClock != 0) { | 4168 | if (min_clocks.memoryClock != 0) { |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index fbafc849ea71..e7fa67063cdc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | |||
@@ -543,7 +543,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = | |||
543 | * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 543 | * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
544 | */ | 544 | */ |
545 | /* SQ */ | 545 | /* SQ */ |
546 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, | 546 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_EN_MASK, DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, |
547 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, | 547 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK, DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, |
548 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, | 548 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_SQ_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, |
549 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, | 549 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_SQ_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, |
@@ -556,7 +556,7 @@ static const struct vega10_didt_config_reg SEEDCCtrlForceStallConfig_Vega10[] = | |||
556 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, | 556 | { ixDIDT_SQ_EDC_CTRL, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN_MASK, DIDT_SQ_EDC_CTRL__SE_EDC_LEVEL_COMB_EN__SHIFT, 0x0001 }, |
557 | 557 | ||
558 | /* TD */ | 558 | /* TD */ |
559 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0001 }, | 559 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_EN_MASK, DIDT_TD_EDC_CTRL__EDC_EN__SHIFT, 0x0000 }, |
560 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, | 560 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK, DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT, 0x0000 }, |
561 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, | 561 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE_MASK, DIDT_TD_EDC_CTRL__EDC_CLK_EN_OVERRIDE__SHIFT, 0x0000 }, |
562 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, | 562 | { ixDIDT_TD_EDC_CTRL, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL_MASK, DIDT_TD_EDC_CTRL__EDC_FORCE_STALL__SHIFT, 0x0001 }, |
@@ -1208,7 +1208,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) | |||
1208 | if (0 != result) | 1208 | if (0 != result) |
1209 | return result; | 1209 | return result; |
1210 | 1210 | ||
1211 | vega10_didt_set_mask(hwmgr, true); | 1211 | vega10_didt_set_mask(hwmgr, false); |
1212 | 1212 | ||
1213 | cgs_enter_safe_mode(hwmgr->device, false); | 1213 | cgs_enter_safe_mode(hwmgr->device, false); |
1214 | 1214 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index e7ab8eb8a0cf..d44243441d28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | |||
@@ -321,10 +321,7 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) | |||
321 | 321 | ||
322 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 322 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
323 | PHM_PlatformCaps_MicrocodeFanControl)) { | 323 | PHM_PlatformCaps_MicrocodeFanControl)) { |
324 | result = vega10_fan_ctrl_set_static_mode(hwmgr, | 324 | result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); |
325 | FDO_PWM_MODE_STATIC); | ||
326 | if (!result) | ||
327 | result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); | ||
328 | } else | 325 | } else |
329 | result = vega10_fan_ctrl_set_default_mode(hwmgr); | 326 | result = vega10_fan_ctrl_set_default_mode(hwmgr); |
330 | 327 | ||
@@ -633,7 +630,6 @@ int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, | |||
633 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 630 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
634 | PHM_PlatformCaps_MicrocodeFanControl)) { | 631 | PHM_PlatformCaps_MicrocodeFanControl)) { |
635 | vega10_fan_ctrl_start_smc_fan_control(hwmgr); | 632 | vega10_fan_ctrl_start_smc_fan_control(hwmgr); |
636 | vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); | ||
637 | } | 633 | } |
638 | 634 | ||
639 | return 0; | 635 | return 0; |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 47e57bd2c36f..91b0105e8240 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | |||
@@ -128,6 +128,8 @@ struct phm_uvd_arbiter { | |||
128 | uint32_t dclk; | 128 | uint32_t dclk; |
129 | uint32_t vclk_ceiling; | 129 | uint32_t vclk_ceiling; |
130 | uint32_t dclk_ceiling; | 130 | uint32_t dclk_ceiling; |
131 | uint32_t vclk_soft_min; | ||
132 | uint32_t dclk_soft_min; | ||
131 | }; | 133 | }; |
132 | 134 | ||
133 | struct phm_vce_arbiter { | 135 | struct phm_vce_arbiter { |
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index e0e106f1b23a..901c960cfe21 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h | |||
@@ -66,7 +66,12 @@ | |||
66 | #define PPSMC_MSG_SetMinVddcrSocVoltage 0x22 | 66 | #define PPSMC_MSG_SetMinVddcrSocVoltage 0x22 |
67 | #define PPSMC_MSG_SetMinVideoFclkFreq 0x23 | 67 | #define PPSMC_MSG_SetMinVideoFclkFreq 0x23 |
68 | #define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24 | 68 | #define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24 |
69 | #define PPSMC_Message_Count 0x25 | 69 | #define PPSMC_MSG_ForcePowerDownGfx 0x25 |
70 | #define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 | ||
71 | #define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 | ||
72 | #define PPSMC_MSG_SetSoftMinVcn 0x28 | ||
73 | #define PPSMC_Message_Count 0x29 | ||
74 | |||
70 | 75 | ||
71 | typedef uint16_t PPSMC_Result; | 76 | typedef uint16_t PPSMC_Result; |
72 | typedef int PPSMC_Msg; | 77 | typedef int PPSMC_Msg; |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5008f3d4cccc..ec63bc5e9de7 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -464,7 +464,7 @@ struct radeon_bo_list { | |||
464 | struct radeon_bo *robj; | 464 | struct radeon_bo *robj; |
465 | struct ttm_validate_buffer tv; | 465 | struct ttm_validate_buffer tv; |
466 | uint64_t gpu_offset; | 466 | uint64_t gpu_offset; |
467 | unsigned prefered_domains; | 467 | unsigned preferred_domains; |
468 | unsigned allowed_domains; | 468 | unsigned allowed_domains; |
469 | uint32_t tiling_flags; | 469 | uint32_t tiling_flags; |
470 | }; | 470 | }; |
@@ -2327,7 +2327,7 @@ struct radeon_device { | |||
2327 | uint8_t *bios; | 2327 | uint8_t *bios; |
2328 | bool is_atom_bios; | 2328 | bool is_atom_bios; |
2329 | uint16_t bios_header_start; | 2329 | uint16_t bios_header_start; |
2330 | struct radeon_bo *stollen_vga_memory; | 2330 | struct radeon_bo *stolen_vga_memory; |
2331 | /* Register mmio */ | 2331 | /* Register mmio */ |
2332 | resource_size_t rmmio_base; | 2332 | resource_size_t rmmio_base; |
2333 | resource_size_t rmmio_size; | 2333 | resource_size_t rmmio_size; |
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 6efbd65c929e..8d3251a10cd4 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c | |||
@@ -351,7 +351,7 @@ out: | |||
351 | * handles it. | 351 | * handles it. |
352 | * Returns NOTIFY code | 352 | * Returns NOTIFY code |
353 | */ | 353 | */ |
354 | int radeon_atif_handler(struct radeon_device *rdev, | 354 | static int radeon_atif_handler(struct radeon_device *rdev, |
355 | struct acpi_bus_event *event) | 355 | struct acpi_bus_event *event) |
356 | { | 356 | { |
357 | struct radeon_atif *atif = &rdev->atif; | 357 | struct radeon_atif *atif = &rdev->atif; |
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.h b/drivers/gpu/drm/radeon/radeon_acpi.h index 7af1977c2c68..35202a453e66 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.h +++ b/drivers/gpu/drm/radeon/radeon_acpi.h | |||
@@ -27,9 +27,6 @@ | |||
27 | struct radeon_device; | 27 | struct radeon_device; |
28 | struct acpi_bus_event; | 28 | struct acpi_bus_event; |
29 | 29 | ||
30 | int radeon_atif_handler(struct radeon_device *rdev, | ||
31 | struct acpi_bus_event *event); | ||
32 | |||
33 | /* AMD hw uses four ACPI control methods: | 30 | /* AMD hw uses four ACPI control methods: |
34 | * 1. ATIF | 31 | * 1. ATIF |
35 | * ARG0: (ACPI_INTEGER) function code | 32 | * ARG0: (ACPI_INTEGER) function code |
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 00b22af70f5c..1ae31dbc61c6 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c | |||
@@ -130,7 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
130 | p->rdev->family == CHIP_RS880)) { | 130 | p->rdev->family == CHIP_RS880)) { |
131 | 131 | ||
132 | /* TODO: is this still needed for NI+ ? */ | 132 | /* TODO: is this still needed for NI+ ? */ |
133 | p->relocs[i].prefered_domains = | 133 | p->relocs[i].preferred_domains = |
134 | RADEON_GEM_DOMAIN_VRAM; | 134 | RADEON_GEM_DOMAIN_VRAM; |
135 | 135 | ||
136 | p->relocs[i].allowed_domains = | 136 | p->relocs[i].allowed_domains = |
@@ -148,14 +148,14 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
148 | return -EINVAL; | 148 | return -EINVAL; |
149 | } | 149 | } |
150 | 150 | ||
151 | p->relocs[i].prefered_domains = domain; | 151 | p->relocs[i].preferred_domains = domain; |
152 | if (domain == RADEON_GEM_DOMAIN_VRAM) | 152 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
153 | domain |= RADEON_GEM_DOMAIN_GTT; | 153 | domain |= RADEON_GEM_DOMAIN_GTT; |
154 | p->relocs[i].allowed_domains = domain; | 154 | p->relocs[i].allowed_domains = domain; |
155 | } | 155 | } |
156 | 156 | ||
157 | if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { | 157 | if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) { |
158 | uint32_t domain = p->relocs[i].prefered_domains; | 158 | uint32_t domain = p->relocs[i].preferred_domains; |
159 | if (!(domain & RADEON_GEM_DOMAIN_GTT)) { | 159 | if (!(domain & RADEON_GEM_DOMAIN_GTT)) { |
160 | DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " | 160 | DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is " |
161 | "allowed for userptr BOs\n"); | 161 | "allowed for userptr BOs\n"); |
@@ -163,7 +163,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) | |||
163 | } | 163 | } |
164 | need_mmap_lock = true; | 164 | need_mmap_lock = true; |
165 | domain = RADEON_GEM_DOMAIN_GTT; | 165 | domain = RADEON_GEM_DOMAIN_GTT; |
166 | p->relocs[i].prefered_domains = domain; | 166 | p->relocs[i].preferred_domains = domain; |
167 | p->relocs[i].allowed_domains = domain; | 167 | p->relocs[i].allowed_domains = domain; |
168 | } | 168 | } |
169 | 169 | ||
@@ -437,7 +437,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo | |||
437 | if (bo == NULL) | 437 | if (bo == NULL) |
438 | continue; | 438 | continue; |
439 | 439 | ||
440 | drm_gem_object_unreference_unlocked(&bo->gem_base); | 440 | drm_gem_object_put_unlocked(&bo->gem_base); |
441 | } | 441 | } |
442 | } | 442 | } |
443 | kfree(parser->track); | 443 | kfree(parser->track); |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 4a4f9533c53b..91952277557e 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -307,7 +307,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, | |||
307 | robj = gem_to_radeon_bo(obj); | 307 | robj = gem_to_radeon_bo(obj); |
308 | ret = radeon_bo_reserve(robj, false); | 308 | ret = radeon_bo_reserve(robj, false); |
309 | if (ret != 0) { | 309 | if (ret != 0) { |
310 | drm_gem_object_unreference_unlocked(obj); | 310 | drm_gem_object_put_unlocked(obj); |
311 | return ret; | 311 | return ret; |
312 | } | 312 | } |
313 | /* Only 27 bit offset for legacy cursor */ | 313 | /* Only 27 bit offset for legacy cursor */ |
@@ -317,7 +317,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc, | |||
317 | radeon_bo_unreserve(robj); | 317 | radeon_bo_unreserve(robj); |
318 | if (ret) { | 318 | if (ret) { |
319 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); | 319 | DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); |
320 | drm_gem_object_unreference_unlocked(obj); | 320 | drm_gem_object_put_unlocked(obj); |
321 | return ret; | 321 | return ret; |
322 | } | 322 | } |
323 | 323 | ||
@@ -352,7 +352,7 @@ unpin: | |||
352 | radeon_bo_unpin(robj); | 352 | radeon_bo_unpin(robj); |
353 | radeon_bo_unreserve(robj); | 353 | radeon_bo_unreserve(robj); |
354 | } | 354 | } |
355 | drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); | 355 | drm_gem_object_put_unlocked(radeon_crtc->cursor_bo); |
356 | } | 356 | } |
357 | 357 | ||
358 | radeon_crtc->cursor_bo = obj; | 358 | radeon_crtc->cursor_bo = obj; |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index ee274c6e374d..ddfe91efa61e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -267,7 +267,7 @@ static void radeon_unpin_work_func(struct work_struct *__work) | |||
267 | } else | 267 | } else |
268 | DRM_ERROR("failed to reserve buffer after flip\n"); | 268 | DRM_ERROR("failed to reserve buffer after flip\n"); |
269 | 269 | ||
270 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 270 | drm_gem_object_put_unlocked(&work->old_rbo->gem_base); |
271 | kfree(work); | 271 | kfree(work); |
272 | } | 272 | } |
273 | 273 | ||
@@ -504,7 +504,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, | |||
504 | obj = old_radeon_fb->obj; | 504 | obj = old_radeon_fb->obj; |
505 | 505 | ||
506 | /* take a reference to the old object */ | 506 | /* take a reference to the old object */ |
507 | drm_gem_object_reference(obj); | 507 | drm_gem_object_get(obj); |
508 | work->old_rbo = gem_to_radeon_bo(obj); | 508 | work->old_rbo = gem_to_radeon_bo(obj); |
509 | 509 | ||
510 | new_radeon_fb = to_radeon_framebuffer(fb); | 510 | new_radeon_fb = to_radeon_framebuffer(fb); |
@@ -603,7 +603,7 @@ pflip_cleanup: | |||
603 | radeon_bo_unreserve(new_rbo); | 603 | radeon_bo_unreserve(new_rbo); |
604 | 604 | ||
605 | cleanup: | 605 | cleanup: |
606 | drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); | 606 | drm_gem_object_put_unlocked(&work->old_rbo->gem_base); |
607 | dma_fence_put(work->fence); | 607 | dma_fence_put(work->fence); |
608 | kfree(work); | 608 | kfree(work); |
609 | return r; | 609 | return r; |
@@ -1288,7 +1288,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) | |||
1288 | { | 1288 | { |
1289 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); | 1289 | struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
1290 | 1290 | ||
1291 | drm_gem_object_unreference_unlocked(radeon_fb->obj); | 1291 | drm_gem_object_put_unlocked(radeon_fb->obj); |
1292 | drm_framebuffer_cleanup(fb); | 1292 | drm_framebuffer_cleanup(fb); |
1293 | kfree(radeon_fb); | 1293 | kfree(radeon_fb); |
1294 | } | 1294 | } |
@@ -1348,14 +1348,14 @@ radeon_user_framebuffer_create(struct drm_device *dev, | |||
1348 | 1348 | ||
1349 | radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); | 1349 | radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); |
1350 | if (radeon_fb == NULL) { | 1350 | if (radeon_fb == NULL) { |
1351 | drm_gem_object_unreference_unlocked(obj); | 1351 | drm_gem_object_put_unlocked(obj); |
1352 | return ERR_PTR(-ENOMEM); | 1352 | return ERR_PTR(-ENOMEM); |
1353 | } | 1353 | } |
1354 | 1354 | ||
1355 | ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); | 1355 | ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj); |
1356 | if (ret) { | 1356 | if (ret) { |
1357 | kfree(radeon_fb); | 1357 | kfree(radeon_fb); |
1358 | drm_gem_object_unreference_unlocked(obj); | 1358 | drm_gem_object_put_unlocked(obj); |
1359 | return ERR_PTR(ret); | 1359 | return ERR_PTR(ret); |
1360 | } | 1360 | } |
1361 | 1361 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index af6ee7d9b465..fd25361ac681 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -118,7 +118,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) | |||
118 | radeon_bo_unpin(rbo); | 118 | radeon_bo_unpin(rbo); |
119 | radeon_bo_unreserve(rbo); | 119 | radeon_bo_unreserve(rbo); |
120 | } | 120 | } |
121 | drm_gem_object_unreference_unlocked(gobj); | 121 | drm_gem_object_put_unlocked(gobj); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | 124 | static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, |
@@ -299,7 +299,7 @@ out: | |||
299 | 299 | ||
300 | } | 300 | } |
301 | if (fb && ret) { | 301 | if (fb && ret) { |
302 | drm_gem_object_unreference_unlocked(gobj); | 302 | drm_gem_object_put_unlocked(gobj); |
303 | drm_framebuffer_unregister_private(fb); | 303 | drm_framebuffer_unregister_private(fb); |
304 | drm_framebuffer_cleanup(fb); | 304 | drm_framebuffer_cleanup(fb); |
305 | kfree(fb); | 305 | kfree(fb); |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 574bf7e6b118..3386452bd2f0 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -271,7 +271,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |||
271 | } | 271 | } |
272 | r = drm_gem_handle_create(filp, gobj, &handle); | 272 | r = drm_gem_handle_create(filp, gobj, &handle); |
273 | /* drop reference from allocate - handle holds it now */ | 273 | /* drop reference from allocate - handle holds it now */ |
274 | drm_gem_object_unreference_unlocked(gobj); | 274 | drm_gem_object_put_unlocked(gobj); |
275 | if (r) { | 275 | if (r) { |
276 | up_read(&rdev->exclusive_lock); | 276 | up_read(&rdev->exclusive_lock); |
277 | r = radeon_gem_handle_lockup(rdev, r); | 277 | r = radeon_gem_handle_lockup(rdev, r); |
@@ -352,7 +352,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
352 | 352 | ||
353 | r = drm_gem_handle_create(filp, gobj, &handle); | 353 | r = drm_gem_handle_create(filp, gobj, &handle); |
354 | /* drop reference from allocate - handle holds it now */ | 354 | /* drop reference from allocate - handle holds it now */ |
355 | drm_gem_object_unreference_unlocked(gobj); | 355 | drm_gem_object_put_unlocked(gobj); |
356 | if (r) | 356 | if (r) |
357 | goto handle_lockup; | 357 | goto handle_lockup; |
358 | 358 | ||
@@ -361,7 +361,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, | |||
361 | return 0; | 361 | return 0; |
362 | 362 | ||
363 | release_object: | 363 | release_object: |
364 | drm_gem_object_unreference_unlocked(gobj); | 364 | drm_gem_object_put_unlocked(gobj); |
365 | 365 | ||
366 | handle_lockup: | 366 | handle_lockup: |
367 | up_read(&rdev->exclusive_lock); | 367 | up_read(&rdev->exclusive_lock); |
@@ -395,7 +395,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, | |||
395 | 395 | ||
396 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | 396 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); |
397 | 397 | ||
398 | drm_gem_object_unreference_unlocked(gobj); | 398 | drm_gem_object_put_unlocked(gobj); |
399 | up_read(&rdev->exclusive_lock); | 399 | up_read(&rdev->exclusive_lock); |
400 | r = radeon_gem_handle_lockup(robj->rdev, r); | 400 | r = radeon_gem_handle_lockup(robj->rdev, r); |
401 | return r; | 401 | return r; |
@@ -414,11 +414,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, | |||
414 | } | 414 | } |
415 | robj = gem_to_radeon_bo(gobj); | 415 | robj = gem_to_radeon_bo(gobj); |
416 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { | 416 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { |
417 | drm_gem_object_unreference_unlocked(gobj); | 417 | drm_gem_object_put_unlocked(gobj); |
418 | return -EPERM; | 418 | return -EPERM; |
419 | } | 419 | } |
420 | *offset_p = radeon_bo_mmap_offset(robj); | 420 | *offset_p = radeon_bo_mmap_offset(robj); |
421 | drm_gem_object_unreference_unlocked(gobj); | 421 | drm_gem_object_put_unlocked(gobj); |
422 | return 0; | 422 | return 0; |
423 | } | 423 | } |
424 | 424 | ||
@@ -453,7 +453,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
453 | 453 | ||
454 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); | 454 | cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); |
455 | args->domain = radeon_mem_type_to_domain(cur_placement); | 455 | args->domain = radeon_mem_type_to_domain(cur_placement); |
456 | drm_gem_object_unreference_unlocked(gobj); | 456 | drm_gem_object_put_unlocked(gobj); |
457 | return r; | 457 | return r; |
458 | } | 458 | } |
459 | 459 | ||
@@ -485,7 +485,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |||
485 | if (rdev->asic->mmio_hdp_flush && | 485 | if (rdev->asic->mmio_hdp_flush && |
486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | 486 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) |
487 | robj->rdev->asic->mmio_hdp_flush(rdev); | 487 | robj->rdev->asic->mmio_hdp_flush(rdev); |
488 | drm_gem_object_unreference_unlocked(gobj); | 488 | drm_gem_object_put_unlocked(gobj); |
489 | r = radeon_gem_handle_lockup(rdev, r); | 489 | r = radeon_gem_handle_lockup(rdev, r); |
490 | return r; | 490 | return r; |
491 | } | 491 | } |
@@ -504,7 +504,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |||
504 | return -ENOENT; | 504 | return -ENOENT; |
505 | robj = gem_to_radeon_bo(gobj); | 505 | robj = gem_to_radeon_bo(gobj); |
506 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); | 506 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
507 | drm_gem_object_unreference_unlocked(gobj); | 507 | drm_gem_object_put_unlocked(gobj); |
508 | return r; | 508 | return r; |
509 | } | 509 | } |
510 | 510 | ||
@@ -527,7 +527,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |||
527 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); | 527 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
528 | radeon_bo_unreserve(rbo); | 528 | radeon_bo_unreserve(rbo); |
529 | out: | 529 | out: |
530 | drm_gem_object_unreference_unlocked(gobj); | 530 | drm_gem_object_put_unlocked(gobj); |
531 | return r; | 531 | return r; |
532 | } | 532 | } |
533 | 533 | ||
@@ -661,14 +661,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, | |||
661 | r = radeon_bo_reserve(rbo, false); | 661 | r = radeon_bo_reserve(rbo, false); |
662 | if (r) { | 662 | if (r) { |
663 | args->operation = RADEON_VA_RESULT_ERROR; | 663 | args->operation = RADEON_VA_RESULT_ERROR; |
664 | drm_gem_object_unreference_unlocked(gobj); | 664 | drm_gem_object_put_unlocked(gobj); |
665 | return r; | 665 | return r; |
666 | } | 666 | } |
667 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); | 667 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
668 | if (!bo_va) { | 668 | if (!bo_va) { |
669 | args->operation = RADEON_VA_RESULT_ERROR; | 669 | args->operation = RADEON_VA_RESULT_ERROR; |
670 | radeon_bo_unreserve(rbo); | 670 | radeon_bo_unreserve(rbo); |
671 | drm_gem_object_unreference_unlocked(gobj); | 671 | drm_gem_object_put_unlocked(gobj); |
672 | return -ENOENT; | 672 | return -ENOENT; |
673 | } | 673 | } |
674 | 674 | ||
@@ -695,7 +695,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, | |||
695 | args->operation = RADEON_VA_RESULT_ERROR; | 695 | args->operation = RADEON_VA_RESULT_ERROR; |
696 | } | 696 | } |
697 | out: | 697 | out: |
698 | drm_gem_object_unreference_unlocked(gobj); | 698 | drm_gem_object_put_unlocked(gobj); |
699 | return r; | 699 | return r; |
700 | } | 700 | } |
701 | 701 | ||
@@ -736,7 +736,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | |||
736 | 736 | ||
737 | radeon_bo_unreserve(robj); | 737 | radeon_bo_unreserve(robj); |
738 | out: | 738 | out: |
739 | drm_gem_object_unreference_unlocked(gobj); | 739 | drm_gem_object_put_unlocked(gobj); |
740 | return r; | 740 | return r; |
741 | } | 741 | } |
742 | 742 | ||
@@ -762,7 +762,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv, | |||
762 | 762 | ||
763 | r = drm_gem_handle_create(file_priv, gobj, &handle); | 763 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
764 | /* drop reference from allocate - handle holds it now */ | 764 | /* drop reference from allocate - handle holds it now */ |
765 | drm_gem_object_unreference_unlocked(gobj); | 765 | drm_gem_object_put_unlocked(gobj); |
766 | if (r) { | 766 | if (r) { |
767 | return r; | 767 | return r; |
768 | } | 768 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8b722297a05c..093594976126 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c | |||
@@ -445,7 +445,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev) | |||
445 | list_del_init(&bo->list); | 445 | list_del_init(&bo->list); |
446 | mutex_unlock(&bo->rdev->gem.mutex); | 446 | mutex_unlock(&bo->rdev->gem.mutex); |
447 | /* this should unref the ttm bo */ | 447 | /* this should unref the ttm bo */ |
448 | drm_gem_object_unreference_unlocked(&bo->gem_base); | 448 | drm_gem_object_put_unlocked(&bo->gem_base); |
449 | } | 449 | } |
450 | } | 450 | } |
451 | 451 | ||
@@ -546,7 +546,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, | |||
546 | list_for_each_entry(lobj, head, tv.head) { | 546 | list_for_each_entry(lobj, head, tv.head) { |
547 | struct radeon_bo *bo = lobj->robj; | 547 | struct radeon_bo *bo = lobj->robj; |
548 | if (!bo->pin_count) { | 548 | if (!bo->pin_count) { |
549 | u32 domain = lobj->prefered_domains; | 549 | u32 domain = lobj->preferred_domains; |
550 | u32 allowed = lobj->allowed_domains; | 550 | u32 allowed = lobj->allowed_domains; |
551 | u32 current_domain = | 551 | u32 current_domain = |
552 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | 552 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2804b4a15896..50f60a587648 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -907,17 +907,17 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
907 | 907 | ||
908 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, | 908 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
909 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, | 909 | RADEON_GEM_DOMAIN_VRAM, 0, NULL, |
910 | NULL, &rdev->stollen_vga_memory); | 910 | NULL, &rdev->stolen_vga_memory); |
911 | if (r) { | 911 | if (r) { |
912 | return r; | 912 | return r; |
913 | } | 913 | } |
914 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); | 914 | r = radeon_bo_reserve(rdev->stolen_vga_memory, false); |
915 | if (r) | 915 | if (r) |
916 | return r; | 916 | return r; |
917 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | 917 | r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); |
918 | radeon_bo_unreserve(rdev->stollen_vga_memory); | 918 | radeon_bo_unreserve(rdev->stolen_vga_memory); |
919 | if (r) { | 919 | if (r) { |
920 | radeon_bo_unref(&rdev->stollen_vga_memory); | 920 | radeon_bo_unref(&rdev->stolen_vga_memory); |
921 | return r; | 921 | return r; |
922 | } | 922 | } |
923 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | 923 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
@@ -946,13 +946,13 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
946 | if (!rdev->mman.initialized) | 946 | if (!rdev->mman.initialized) |
947 | return; | 947 | return; |
948 | radeon_ttm_debugfs_fini(rdev); | 948 | radeon_ttm_debugfs_fini(rdev); |
949 | if (rdev->stollen_vga_memory) { | 949 | if (rdev->stolen_vga_memory) { |
950 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); | 950 | r = radeon_bo_reserve(rdev->stolen_vga_memory, false); |
951 | if (r == 0) { | 951 | if (r == 0) { |
952 | radeon_bo_unpin(rdev->stollen_vga_memory); | 952 | radeon_bo_unpin(rdev->stolen_vga_memory); |
953 | radeon_bo_unreserve(rdev->stollen_vga_memory); | 953 | radeon_bo_unreserve(rdev->stolen_vga_memory); |
954 | } | 954 | } |
955 | radeon_bo_unref(&rdev->stollen_vga_memory); | 955 | radeon_bo_unref(&rdev->stolen_vga_memory); |
956 | } | 956 | } |
957 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | 957 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
958 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | 958 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); |
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 5f68245579a3..5e82b408d522 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c | |||
@@ -139,7 +139,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, | |||
139 | 139 | ||
140 | /* add the vm page table to the list */ | 140 | /* add the vm page table to the list */ |
141 | list[0].robj = vm->page_directory; | 141 | list[0].robj = vm->page_directory; |
142 | list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; | 142 | list[0].preferred_domains = RADEON_GEM_DOMAIN_VRAM; |
143 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | 143 | list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
144 | list[0].tv.bo = &vm->page_directory->tbo; | 144 | list[0].tv.bo = &vm->page_directory->tbo; |
145 | list[0].tv.shared = true; | 145 | list[0].tv.shared = true; |
@@ -151,7 +151,7 @@ struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, | |||
151 | continue; | 151 | continue; |
152 | 152 | ||
153 | list[idx].robj = vm->page_tables[i].bo; | 153 | list[idx].robj = vm->page_tables[i].bo; |
154 | list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; | 154 | list[idx].preferred_domains = RADEON_GEM_DOMAIN_VRAM; |
155 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; | 155 | list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; |
156 | list[idx].tv.bo = &list[idx].robj->tbo; | 156 | list[idx].tv.bo = &list[idx].robj->tbo; |
157 | list[idx].tv.shared = true; | 157 | list[idx].tv.shared = true; |